code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import baseaa
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
return baseaa.baaencode(string.encode('utf-8' ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : bytes ):
return baseaa.baadecode(SCREAMING_SNAKE_CASE__ ).decode('utf-8' )
if __name__ == "__main__":
_A = 'Hello World!'
_A = baseaa_encode(test)
print(encoded)
_A = baseaa_decode(encoded)
print(decoded)
| 703 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> str:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1e-4
def _a ( self ) -> int:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> int:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1e-4
def _a ( self ) -> List[Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 682 | 0 |
import warnings
from ..trainer import Trainer
from ..utils import logging
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_=None , **A_ ) -> Union[str, Any]:
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , A_ , )
super().__init__(args=A_ , **A_ )
| 704 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 | 0 |
import string
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase =''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase =string.ascii_uppercase.find(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =num - key
if num < 0:
__UpperCamelCase =num + len(string.ascii_uppercase )
__UpperCamelCase =translated + string.ascii_uppercase[num]
else:
__UpperCamelCase =translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def _UpperCAmelCase ( ):
__UpperCamelCase =input('Encrypted message: ' )
__UpperCamelCase =message.upper()
decrypt(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 706 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =original_name.split('.' )[0]
__UpperCamelCase =key.split('.' )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
__UpperCamelCase =orig_block_num - offset
__UpperCamelCase =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =OrderedDict()
__UpperCamelCase , __UpperCamelCase =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase =key[: key.find('proj' )]
__UpperCamelCase =key.replace(SCREAMING_SNAKE_CASE__ , F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm1' , 'before_norm' )
if "norm2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__UpperCamelCase =key.replace('head' , 'classifier' )
__UpperCamelCase =value
return new_state_dict
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =model_name[-3:]
__UpperCamelCase =10_00
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =(1, 10_00)
# set config attributes
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase =[2, 2, 6, 2]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s24":
__UpperCamelCase =[4, 4, 12, 4]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.9
elif size == "m36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
elif size == "m48":
__UpperCamelCase =[8, 8, 24, 8]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase =rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
__UpperCamelCase =PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase =torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__UpperCamelCase =torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__UpperCamelCase =torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__UpperCamelCase =torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__UpperCamelCase =torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 682 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = "gptj"
UpperCAmelCase__ : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , A_=50400 , A_=2048 , A_=4096 , A_=28 , A_=16 , A_=64 , A_=None , A_="gelu_new" , A_=0.0 , A_=0.0 , A_=0.0 , A_=1E-5 , A_=0.02 , A_=True , A_=50256 , A_=50256 , A_=False , **A_ , ) -> Tuple:
__UpperCamelCase =vocab_size
__UpperCamelCase =n_positions
__UpperCamelCase =n_embd
__UpperCamelCase =n_layer
__UpperCamelCase =n_head
__UpperCamelCase =n_inner
__UpperCamelCase =rotary_dim
__UpperCamelCase =activation_function
__UpperCamelCase =resid_pdrop
__UpperCamelCase =embd_pdrop
__UpperCamelCase =attn_pdrop
__UpperCamelCase =layer_norm_epsilon
__UpperCamelCase =initializer_range
__UpperCamelCase =use_cache
__UpperCamelCase =bos_token_id
__UpperCamelCase =eos_token_id
super().__init__(
bos_token_id=A_ , eos_token_id=A_ , tie_word_embeddings=A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ = "default" , A_ = None , A_ = False , ) -> Optional[int]:
super().__init__(A_ , task=A_ , patching_specs=A_ , use_past=A_ )
if not getattr(self._config , 'pad_token_id' , A_ ):
# TODO: how to do that better?
__UpperCamelCase =0
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(A_ , direction='inputs' )
__UpperCamelCase ={0: 'batch', 1: 'past_sequence + sequence'}
else:
__UpperCamelCase ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _a ( self ) -> int:
return self._config.n_layer
@property
def _a ( self ) -> int:
return self._config.n_head
def _a ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]:
__UpperCamelCase =super(A_ , self ).generate_dummy_inputs(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase =OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__UpperCamelCase , __UpperCamelCase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__UpperCamelCase =seqlen + 2
__UpperCamelCase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase =[
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(self.num_layers )
]
__UpperCamelCase =common_inputs['attention_mask']
if self.use_past:
__UpperCamelCase =ordered_inputs['attention_mask'].dtype
__UpperCamelCase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
return ordered_inputs
@property
def _a ( self ) -> int:
return 13
| 707 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A = 16
_A = 32
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int = 16 ):
__UpperCamelCase =AutoTokenizer.from_pretrained('bert-base-cased' )
__UpperCamelCase =load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase =datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase =16
elif accelerator.mixed_precision != "no":
__UpperCamelCase =8
else:
__UpperCamelCase =None
return tokenizer.pad(
SCREAMING_SNAKE_CASE__ , padding='longest' , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , )
# Instantiate dataloaders.
__UpperCamelCase =DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_A = mocked_dataloaders # noqa: F811
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , SCREAMING_SNAKE_CASE__ ) == "1":
__UpperCamelCase =2
# Initialize accelerator
__UpperCamelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase =config['lr']
__UpperCamelCase =int(config['num_epochs'] )
__UpperCamelCase =int(config['seed'] )
__UpperCamelCase =int(config['batch_size'] )
__UpperCamelCase =evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__UpperCamelCase =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCamelCase =batch_size // MAX_GPU_BATCH_SIZE
__UpperCamelCase =MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase =model.to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase =AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
# Instantiate scheduler
__UpperCamelCase =get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCamelCase =model(**SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.loss
__UpperCamelCase =loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__UpperCamelCase =0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase =model(**SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits.argmax(dim=-1 )
__UpperCamelCase , __UpperCamelCase =accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(SCREAMING_SNAKE_CASE__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__UpperCamelCase =predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , )
__UpperCamelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__UpperCamelCase =parser.parse_args()
__UpperCamelCase ={'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 708 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 1 if input_a == input_a else 0
def _UpperCAmelCase ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 682 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
_A = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
__UpperCamelCase =tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase =test_hf_cache_home / 'datasets'
__UpperCamelCase =test_hf_cache_home / 'metrics'
__UpperCamelCase =test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(SCREAMING_SNAKE_CASE__ ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(SCREAMING_SNAKE_CASE__ ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(SCREAMING_SNAKE_CASE__ ) )
__UpperCamelCase =test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(SCREAMING_SNAKE_CASE__ ) )
__UpperCamelCase =test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(SCREAMING_SNAKE_CASE__ ) )
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE__ , scope='session' )
def _UpperCAmelCase ( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , SCREAMING_SNAKE_CASE__ )
| 709 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 ):
__UpperCamelCase =right or len(SCREAMING_SNAKE_CASE__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=128 , A_=32 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> str:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =scope
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> Optional[int]:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def _a ( self ) -> str:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =self.prepare_config_and_inputs()
__UpperCamelCase =True
__UpperCamelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =NezhaModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ )
__UpperCamelCase =model(A_ , token_type_ids=A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =True
__UpperCamelCase =NezhaModel(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(
A_ , attention_mask=A_ , token_type_ids=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
__UpperCamelCase =model(
A_ , attention_mask=A_ , token_type_ids=A_ , encoder_hidden_states=A_ , )
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =NezhaForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =NezhaForNextSentencePrediction(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =NezhaForPreTraining(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , next_sentence_label=A_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =NezhaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =self.num_labels
__UpperCamelCase =NezhaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =NezhaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =NezhaForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> str:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Dict = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = True
def _a ( self , A_ , A_ , A_=False ) -> Optional[Any]:
__UpperCamelCase =super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class in get_values(A_ ):
__UpperCamelCase =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A_ )
__UpperCamelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =NezhaModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _a ( self ) -> List[str]:
# This regression test was failing with PyTorch < 1.3
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =self.model_tester.prepare_config_and_inputs_for_decoder()
__UpperCamelCase =None
self.model_tester.create_and_check_model_as_decoder(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =NezhaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@slow
@require_torch_gpu
def _a ( self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__UpperCamelCase =True
__UpperCamelCase =model_class(config=A_ )
__UpperCamelCase =self._prepare_for_class(A_ , A_ )
__UpperCamelCase =torch.jit.trace(
A_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A_ , os.path.join(A_ , 'bert.pt' ) )
__UpperCamelCase =torch.jit.load(os.path.join(A_ , 'bert.pt' ) , map_location=A_ )
loaded(inputs_dict['input_ids'].to(A_ ) , inputs_dict['attention_mask'].to(A_ ) )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
__UpperCamelCase =torch.tensor([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCamelCase =model(A_ , attention_mask=A_ )[0]
__UpperCamelCase =torch.Size((1, 6, 768) )
self.assertEqual(output.shape , A_ )
__UpperCamelCase =torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1E-4 ) )
@slow
def _a ( self ) -> Any:
__UpperCamelCase =NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
__UpperCamelCase =torch.tensor([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCamelCase =model(A_ , attention_mask=A_ )[0]
__UpperCamelCase =torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , A_ )
__UpperCamelCase =torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1E-4 ) )
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 | 0 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_A = True
except (ImportError, AttributeError):
_A = object
def _UpperCAmelCase ( *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
pass
_A = False
_A = logging.get_logger('transformers-cli/serving')
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Namespace ):
__UpperCamelCase =pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(SCREAMING_SNAKE_CASE__ , args.host , args.port , args.workers )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : dict
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : List[str]
UpperCAmelCase__ : Optional[List[int]]
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
@staticmethod
def _a ( A_ ) -> Union[str, Any]:
__UpperCamelCase =parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=A_ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=A_ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=A_ , default=8888 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=A_ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=A_ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=A_ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=A_ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=A_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=A_ )
def __init__( self , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =pipeline
__UpperCamelCase =host
__UpperCamelCase =port
__UpperCamelCase =workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f'Serving model over {host}:{port}' )
__UpperCamelCase =FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=A_ , response_class=A_ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=A_ , response_class=A_ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=A_ , response_class=A_ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=A_ , response_class=A_ , methods=['POST'] , ),
] , timeout=600 , )
def _a ( self ) -> int:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _a ( self ) -> str:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _a ( self , A_ = Body(A_ , embed=A_ ) , A_ = Body(A_ , embed=A_ ) ) -> Optional[Any]:
try:
__UpperCamelCase =self._pipeline.tokenizer.tokenize(A_ )
if return_ids:
__UpperCamelCase =self._pipeline.tokenizer.convert_tokens_to_ids(A_ )
return ServeTokenizeResult(tokens=A_ , tokens_ids=A_ )
else:
return ServeTokenizeResult(tokens=A_ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(A_ )} )
def _a ( self , A_ = Body(A_ , embed=A_ ) , A_ = Body(A_ , embed=A_ ) , A_ = Body(A_ , embed=A_ ) , ) -> Optional[int]:
try:
__UpperCamelCase =self._pipeline.tokenizer.decode(A_ , A_ , A_ )
return ServeDeTokenizeResult(model='' , text=A_ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'model': '', 'error': str(A_ )} )
async def _a ( self , A_=Body(A_ , embed=A_ ) ) -> int:
# Check we don't have empty string
if len(A_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__UpperCamelCase =self._pipeline(A_ )
return ServeForwardResult(output=A_ )
except Exception as e:
raise HTTPException(500 , {'error': str(A_ )} )
| 711 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_A = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=A_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _a ( self ) -> Dict:
__UpperCamelCase =super().to_dict()
for k, v in d.items():
if isinstance(A_ , A_ ):
__UpperCamelCase =v.to_dict()
return d
| 682 | 0 |
_A = 'Alexander Joslin'
import operator as op
from .stack import Stack
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase ={'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
__UpperCamelCase =Stack()
__UpperCamelCase =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE__ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE__ )
elif i == ")":
# RULE 4
__UpperCamelCase =operator_stack.peek()
operator_stack.pop()
__UpperCamelCase =operand_stack.peek()
operand_stack.pop()
__UpperCamelCase =operand_stack.peek()
operand_stack.pop()
__UpperCamelCase =operators[opr](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
operand_stack.push(SCREAMING_SNAKE_CASE__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_A = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 712 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682 | 0 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=0.6 , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =mask_ratio
__UpperCamelCase =scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__UpperCamelCase =(image_size // patch_size) ** 2
__UpperCamelCase =int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _a ( self ) -> int:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Dict:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _a ( self , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =ViTMAEModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> int:
__UpperCamelCase =ViTMAEForPreTraining(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
__UpperCamelCase =(self.image_size // self.patch_size) ** 2
__UpperCamelCase =self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__UpperCamelCase =1
__UpperCamelCase =ViTMAEForPreTraining(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase =model(A_ )
__UpperCamelCase =self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _a ( self ) -> Any:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
UpperCAmelCase__ : List[Any] = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = False
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =ViTMAEModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> str:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A_ )
def _a ( self , A_ , A_ , A_ ) -> str:
# make masks reproducible
np.random.seed(2 )
__UpperCamelCase =int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__UpperCamelCase =np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__UpperCamelCase =torch.from_numpy(A_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__UpperCamelCase =pt_noise
super().check_pt_tf_models(A_ , A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
model.to(A_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__UpperCamelCase =model(**self._prepare_for_class(A_ , A_ ) )
__UpperCamelCase =outputs[0].cpu().numpy()
__UpperCamelCase =0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ )
__UpperCamelCase =model_class.from_pretrained(A_ )
model.to(A_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__UpperCamelCase =model(**self._prepare_for_class(A_ , A_ ) )
# Make sure we don't have nans
__UpperCamelCase =after_outputs[0].cpu().numpy()
__UpperCamelCase =0
__UpperCamelCase =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_ , 1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def _a ( self ) -> Dict:
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def _a ( self ) -> Union[str, Any]:
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def _a ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def _a ( self ) -> List[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self ) -> str:
pass
@slow
def _a ( self ) -> Any:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTMAEModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def _a ( self ) -> Any:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__UpperCamelCase =ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__UpperCamelCase =ViTMAEConfig()
__UpperCamelCase =int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__UpperCamelCase =np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ , noise=torch.from_numpy(A_ ).to(device=A_ ) )
# verify the logits
__UpperCamelCase =torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(A_ ) , atol=1E-4 ) )
| 713 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = filter_non_english
def _a ( self ) -> Optional[Any]:
super().setUp()
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase ={}
__UpperCamelCase ={}
for i, value in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =i
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A_ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Any:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase ={}
for i, token in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =RoCBertWordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase =tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase =tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
__UpperCamelCase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ) -> List[str]:
__UpperCamelCase =['的', '人', '有']
__UpperCamelCase =''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =False
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.encode('你好' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode('你是谁' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='你好,你是谁'
__UpperCamelCase =tokenizer.tokenize(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase =tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 682 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = (UniPCMultistepScheduler,)
UpperCAmelCase__ : Tuple = (("num_inference_steps", 2_5),)
def _a ( self , **A_ ) -> Dict:
__UpperCamelCase ={
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**A_ )
return config
def _a ( self , A_=0 , **A_ ) -> str:
__UpperCamelCase =dict(self.forward_default_kwargs )
__UpperCamelCase =kwargs.pop('num_inference_steps' , A_ )
__UpperCamelCase =self.dummy_sample
__UpperCamelCase =0.1 * sample
__UpperCamelCase =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__UpperCamelCase =self.get_scheduler_config(**A_ )
__UpperCamelCase =scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals
__UpperCamelCase =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
__UpperCamelCase =scheduler_class.from_pretrained(A_ )
new_scheduler.set_timesteps(A_ )
# copy over dummy past residuals
__UpperCamelCase =dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCamelCase , __UpperCamelCase =sample, sample
for t in range(A_ , time_step + scheduler.config.solver_order + 1 ):
__UpperCamelCase =scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample
__UpperCamelCase =new_scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a ( self , A_=0 , **A_ ) -> Union[str, Any]:
__UpperCamelCase =dict(self.forward_default_kwargs )
__UpperCamelCase =kwargs.pop('num_inference_steps' , A_ )
__UpperCamelCase =self.dummy_sample
__UpperCamelCase =0.1 * sample
__UpperCamelCase =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__UpperCamelCase =self.get_scheduler_config()
__UpperCamelCase =scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCamelCase =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
__UpperCamelCase =scheduler_class.from_pretrained(A_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(A_ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCamelCase =dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCamelCase =scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample
__UpperCamelCase =new_scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a ( self , A_=None , **A_ ) -> Union[str, Any]:
if scheduler is None:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config(**A_ )
__UpperCamelCase =scheduler_class(**A_ )
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config(**A_ )
__UpperCamelCase =scheduler_class(**A_ )
__UpperCamelCase =10
__UpperCamelCase =self.dummy_model()
__UpperCamelCase =self.dummy_sample_deter
scheduler.set_timesteps(A_ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase =model(A_ , A_ )
__UpperCamelCase =scheduler.step(A_ , A_ , A_ ).prev_sample
return sample
def _a ( self ) -> int:
__UpperCamelCase =dict(self.forward_default_kwargs )
__UpperCamelCase =kwargs.pop('num_inference_steps' , A_ )
for scheduler_class in self.scheduler_classes:
__UpperCamelCase =self.get_scheduler_config()
__UpperCamelCase =scheduler_class(**A_ )
__UpperCamelCase =self.dummy_sample
__UpperCamelCase =0.1 * sample
if num_inference_steps is not None and hasattr(A_ , 'set_timesteps' ):
scheduler.set_timesteps(A_ )
elif num_inference_steps is not None and not hasattr(A_ , 'set_timesteps' ):
__UpperCamelCase =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCamelCase =[residual + 0.2, residual + 0.15, residual + 0.10]
__UpperCamelCase =dummy_past_residuals[: scheduler.config.solver_order]
__UpperCamelCase =scheduler.timesteps[5]
__UpperCamelCase =scheduler.timesteps[6]
__UpperCamelCase =scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample
__UpperCamelCase =scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _a ( self ) -> List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__UpperCamelCase =UniPCMultistepScheduler(**self.get_scheduler_config() )
__UpperCamelCase =self.full_loop(scheduler=A_ )
__UpperCamelCase =torch.mean(torch.abs(A_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
__UpperCamelCase =DPMSolverSinglestepScheduler.from_config(scheduler.config )
__UpperCamelCase =DEISMultistepScheduler.from_config(scheduler.config )
__UpperCamelCase =DPMSolverMultistepScheduler.from_config(scheduler.config )
__UpperCamelCase =UniPCMultistepScheduler.from_config(scheduler.config )
__UpperCamelCase =self.full_loop(scheduler=A_ )
__UpperCamelCase =torch.mean(torch.abs(A_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def _a ( self ) -> Dict:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def _a ( self ) -> List[str]:
self.check_over_configs(thresholding=A_ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , solver_order=A_ , solver_type=A_ , )
def _a ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def _a ( self ) -> Optional[int]:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A_ , solver_type=A_ , prediction_type=A_ , )
__UpperCamelCase =self.full_loop(
solver_order=A_ , solver_type=A_ , prediction_type=A_ , )
assert not torch.isnan(A_ ).any(), "Samples have nan numbers"
def _a ( self ) -> str:
self.check_over_configs(lower_order_final=A_ )
self.check_over_configs(lower_order_final=A_ )
def _a ( self ) -> Any:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=A_ , time_step=0 )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.full_loop()
__UpperCamelCase =torch.mean(torch.abs(A_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def _a ( self ) -> str:
__UpperCamelCase =self.full_loop(prediction_type='v_prediction' )
__UpperCamelCase =torch.mean(torch.abs(A_ ) )
assert abs(result_mean.item() - 0.1014 ) < 1E-3
def _a ( self ) -> Dict:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config(thresholding=A_ , dynamic_thresholding_ratio=0 )
__UpperCamelCase =scheduler_class(**A_ )
__UpperCamelCase =10
__UpperCamelCase =self.dummy_model()
__UpperCamelCase =self.dummy_sample_deter.half()
scheduler.set_timesteps(A_ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase =model(A_ , A_ )
__UpperCamelCase =scheduler.step(A_ , A_ , A_ ).prev_sample
assert sample.dtype == torch.floataa
def _a ( self , **A_ ) -> str:
for scheduler_class in self.scheduler_classes:
__UpperCamelCase =self.get_scheduler_config(**A_ )
__UpperCamelCase =scheduler_class(**A_ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 714 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A = random.Random()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
if rng is None:
__UpperCamelCase =global_rng
__UpperCamelCase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =min_seq_length
__UpperCamelCase =max_seq_length
__UpperCamelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase =padding_value
__UpperCamelCase =sampling_rate
__UpperCamelCase =return_attention_mask
__UpperCamelCase =do_normalize
__UpperCamelCase =feature_size
__UpperCamelCase =chunk_length
__UpperCamelCase =hop_length
def _a ( self ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , A_=False , A_=False ) -> Any:
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase =[np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
__UpperCamelCase =WhisperFeatureExtractionTester(self )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase =self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase =self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCamelCase =feature_extractor(A_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCamelCase =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCamelCase =np.asarray(A_ )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
__UpperCamelCase =[x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self ) -> Dict:
import torch
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCamelCase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__UpperCamelCase =ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__UpperCamelCase =self._load_datasamples(1 )
__UpperCamelCase =WhisperFeatureExtractor()
__UpperCamelCase =feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =self._load_datasamples(1 )[0]
__UpperCamelCase =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCamelCase =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 682 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682 | 0 |
'''simple docstring'''
import requests
_A = '' # <-- Put your OpenWeatherMap appid here!
_A = 'https://api.openweathermap.org/data/2.5/'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "Chicago" , SCREAMING_SNAKE_CASE__ : str = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "Kolkata, India" , SCREAMING_SNAKE_CASE__ : str = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float = 55.68 , SCREAMING_SNAKE_CASE__ : float = 12.57 , SCREAMING_SNAKE_CASE__ : str = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_A = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 716 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
# ===== initialization =====
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 | 0 |
from __future__ import annotations
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_=None ) -> Any:
__UpperCamelCase =data
__UpperCamelCase =None
def __repr__( self ) -> Tuple:
__UpperCamelCase =[]
__UpperCamelCase =self
while temp:
string_rep.append(f'{temp.data}' )
__UpperCamelCase =temp.next
return "->".join(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list ):
if not elements_list:
raise Exception('The Elements List is empty' )
__UpperCamelCase =__UpperCamelCase =Node(elements_list[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =Node(elements_list[i] )
__UpperCamelCase =current.next
return head
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Node ):
if head_node is not None and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
print_reverse(head_node.next )
print(head_node.data )
def _UpperCAmelCase ( ):
from doctest import testmod
testmod()
__UpperCamelCase =make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(SCREAMING_SNAKE_CASE__ )
print('Elements in Reverse:' )
print_reverse(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 717 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =xa
__UpperCamelCase =xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__UpperCamelCase =x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCamelCase =x_na
__UpperCamelCase =x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 682 | 0 |
from __future__ import annotations
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[]
create_all_state(1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , [] , SCREAMING_SNAKE_CASE__ )
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[int]] , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE__ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE__ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE__ , level - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
current_list.pop()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[list[int]] ):
for i in total_list:
print(*SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = 4
_A = 2
_A = generate_all_combinations(n, k)
print_all_state(total_list)
| 718 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 682 | 0 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_A = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_A = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0]
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_51:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bytestream.read(rows * cols * num_images )
__UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
__UpperCamelCase =data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
return data
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.one_hot on tensors.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =labels_dense.shape[0]
__UpperCamelCase =numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes
__UpperCamelCase =numpy.zeros((num_labels, num_classes) )
__UpperCamelCase =1
return labels_one_hot
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please use tf.data to implement this functionality.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=10 ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_49:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__UpperCamelCase =_readaa(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bytestream.read(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
A_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , A_ , A_ , A_=False , A_=False , A_=dtypes.floataa , A_=True , A_=None , ) -> List[str]:
__UpperCamelCase , __UpperCamelCase =random_seed.get_seed(A_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__UpperCamelCase =dtypes.as_dtype(A_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__UpperCamelCase =10000
__UpperCamelCase =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
__UpperCamelCase =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__UpperCamelCase =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__UpperCamelCase =images.astype(numpy.floataa )
__UpperCamelCase =numpy.multiply(A_ , 1.0 / 255.0 )
__UpperCamelCase =images
__UpperCamelCase =labels
__UpperCamelCase =0
__UpperCamelCase =0
@property
def _a ( self ) -> Tuple:
return self._images
@property
def _a ( self ) -> Dict:
return self._labels
@property
def _a ( self ) -> List[str]:
return self._num_examples
@property
def _a ( self ) -> Dict:
return self._epochs_completed
def _a ( self , A_ , A_=False , A_=True ) -> Any:
if fake_data:
__UpperCamelCase =[1] * 784
__UpperCamelCase =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A_ )],
[fake_label for _ in range(A_ )],
)
__UpperCamelCase =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__UpperCamelCase =numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__UpperCamelCase =self.images[perma]
__UpperCamelCase =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__UpperCamelCase =self._num_examples - start
__UpperCamelCase =self._images[start : self._num_examples]
__UpperCamelCase =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__UpperCamelCase =numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
__UpperCamelCase =self.images[perm]
__UpperCamelCase =self.labels[perm]
# Start next epoch
__UpperCamelCase =0
__UpperCamelCase =batch_size - rest_num_examples
__UpperCamelCase =self._index_in_epoch
__UpperCamelCase =self._images[start:end]
__UpperCamelCase =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__UpperCamelCase =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(SCREAMING_SNAKE_CASE__ , 'Please write your own downloading logic.' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
gfile.MakeDirs(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310
with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f:
__UpperCamelCase =f.size()
print('Successfully downloaded' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'bytes.' )
return filepath
@deprecated(
SCREAMING_SNAKE_CASE__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=dtypes.floataa , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=50_00 , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =fake()
__UpperCamelCase =fake()
__UpperCamelCase =fake()
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
if not source_url: # empty string check
__UpperCamelCase =DEFAULT_SOURCE_URL
__UpperCamelCase ='train-images-idx3-ubyte.gz'
__UpperCamelCase ='train-labels-idx1-ubyte.gz'
__UpperCamelCase ='t10k-images-idx3-ubyte.gz'
__UpperCamelCase ='t10k-labels-idx1-ubyte.gz'
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_images(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
__UpperCamelCase =_extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =(
'Validation size should be between 0 and '
F'{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =train_images[:validation_size]
__UpperCamelCase =train_labels[:validation_size]
__UpperCamelCase =train_images[validation_size:]
__UpperCamelCase =train_labels[validation_size:]
__UpperCamelCase ={'dtype': dtype, 'reshape': reshape, 'seed': seed}
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
| 719 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCamelCase =(self.image_size // 32) ** 2
__UpperCamelCase =num_patches + 1
def _a ( self ) -> str:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> str:
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' )
__UpperCamelCase =model(**A_ )
__UpperCamelCase =outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCamelCase =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 682 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {'vocab_file': 'spiece.model'}
_A = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
_A = {
'google/reformer-crime-and-punishment': 52_4288,
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_="</s>" , A_="<unk>" , A_=[] , A_ = None , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A_ , unk_token=A_ , additional_special_tokens=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _a ( self ) -> Union[str, Any]:
return self.sp_model.get_piece_size()
def _a ( self ) -> Dict[str, int]:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> Any:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> str:
return self.sp_model.piece_to_id(A_ )
def _a ( self , A_ ) -> Dict:
if index < self.sp_model.get_piece_size():
__UpperCamelCase =self.sp_model.IdToPiece(A_ )
return token
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 720 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : LevitConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase =timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 1_92:
__UpperCamelCase =timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 2_56:
__UpperCamelCase =timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 3_84:
__UpperCamelCase =timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE__ )
from_model.eval()
__UpperCamelCase =LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =OrderedDict()
__UpperCamelCase =from_model.state_dict()
__UpperCamelCase =list(from_model.state_dict().keys() )
__UpperCamelCase =list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
__UpperCamelCase =name
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ):
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__UpperCamelCase ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 682 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase ='laion/clap-htsat-unfused'
__UpperCamelCase =tempfile.mkdtemp()
def _a ( self , **A_ ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def _a ( self , **A_ ) -> Dict:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _a ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> str:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> int:
__UpperCamelCase =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase =self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =floats_list((3, 1000) )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' )
__UpperCamelCase =processor(audios=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase ='This is a test string'
__UpperCamelCase =processor(text=A_ )
__UpperCamelCase =tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(A_ )
__UpperCamelCase =tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 682 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=2 , A_=3 , A_=4 , A_=2 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=36 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=6 , A_=6 , A_=3 , A_=4 , A_=None , A_=1000 , ) -> List[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =coordinate_size
__UpperCamelCase =shape_size
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =scope
__UpperCamelCase =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__UpperCamelCase =text_seq_length
__UpperCamelCase =(image_size // patch_size) ** 2 + 1
__UpperCamelCase =self.text_seq_length + self.image_seq_length
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__UpperCamelCase =bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__UpperCamelCase =bbox[i, j, 3]
__UpperCamelCase =bbox[i, j, 1]
__UpperCamelCase =tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__UpperCamelCase =bbox[i, j, 2]
__UpperCamelCase =bbox[i, j, 0]
__UpperCamelCase =tmp_coordinate
__UpperCamelCase =tf.constant(A_ )
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.text_seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__UpperCamelCase =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFLayoutLMvaModel(config=A_ )
# text + image
__UpperCamelCase =model(A_ , pixel_values=A_ , training=A_ )
__UpperCamelCase =model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , training=A_ , )
__UpperCamelCase =model(A_ , bbox=A_ , pixel_values=A_ , training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__UpperCamelCase =model(A_ , training=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__UpperCamelCase =model({'pixel_values': pixel_values} , training=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFLayoutLMvaForSequenceClassification(config=A_ )
__UpperCamelCase =model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , training=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFLayoutLMvaForTokenClassification(config=A_ )
__UpperCamelCase =model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , training=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =2
__UpperCamelCase =TFLayoutLMvaForQuestionAnswering(config=A_ )
__UpperCamelCase =model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , training=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> str:
__UpperCamelCase =self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : List[str] = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : int = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Dict:
return True
def _a ( self , A_ , A_ , A_=False ) -> dict:
__UpperCamelCase =copy.deepcopy(A_ )
if model_class in get_values(A_ ):
__UpperCamelCase ={
k: tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(A_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A_ ):
__UpperCamelCase =tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A_ ):
__UpperCamelCase =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__UpperCamelCase =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A_ ):
__UpperCamelCase =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(A_ ):
__UpperCamelCase =tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _a ( self ) -> List[str]:
__UpperCamelCase =TFLayoutLMvaModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> int:
self.config_tester.run_common_tests()
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
if getattr(A_ , 'hf_compute_loss' , A_ ):
# The number of elements in the loss should be the same as the number of elements in the label
__UpperCamelCase =self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
__UpperCamelCase =prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=A_ )[0]
]
__UpperCamelCase =added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__UpperCamelCase =self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
__UpperCamelCase =prepared_for_class.pop('input_ids' )
__UpperCamelCase =model(A_ , **A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__UpperCamelCase =self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
__UpperCamelCase =prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__UpperCamelCase =prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__UpperCamelCase =-100
__UpperCamelCase =tf.convert_to_tensor(A_ )
__UpperCamelCase =model(A_ , **A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__UpperCamelCase =self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
__UpperCamelCase =model(A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__UpperCamelCase =self._prepare_for_class(inputs_dict.copy() , A_ , return_labels=A_ )
# Get keys that were added with the _prepare_for_class function
__UpperCamelCase =prepared_for_class.keys() - inputs_dict.keys()
__UpperCamelCase =inspect.signature(model.call ).parameters
__UpperCamelCase =list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__UpperCamelCase ={0: 'input_ids'}
for label_key in label_keys:
__UpperCamelCase =signature_names.index(A_ )
__UpperCamelCase =label_key
__UpperCamelCase =sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__UpperCamelCase =[]
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__UpperCamelCase =prepared_for_class[value]
__UpperCamelCase =tuple(A_ )
# Send to model
__UpperCamelCase =model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _a ( self ) -> Optional[int]:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A_ , A_ , A_ , A_ , A_ , A_ )
def _a ( self ) -> List[Any]:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase =type
self.model_tester.create_and_check_model(A_ , A_ , A_ , A_ , A_ , A_ )
def _a ( self ) -> Optional[int]:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
A_ , A_ , A_ , A_ , A_ , A_ , A_ )
def _a ( self ) -> int:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
A_ , A_ , A_ , A_ , A_ , A_ , A_ )
def _a ( self ) -> Any:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
A_ , A_ , A_ , A_ , A_ , A_ , A_ )
@slow
def _a ( self ) -> str:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFLayoutLMvaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Optional[int]:
return LayoutLMvaImageProcessor(apply_ocr=A_ ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
__UpperCamelCase =TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='tf' ).pixel_values
__UpperCamelCase =tf.constant([[1, 2]] )
__UpperCamelCase =tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__UpperCamelCase =model(input_ids=A_ , bbox=A_ , pixel_values=A_ , training=A_ )
# verify the logits
__UpperCamelCase =(1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , A_ )
__UpperCamelCase =tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , A_ , atol=1E-4 ) )
| 700 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('test' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__UpperCamelCase =script_name
else:
__UpperCamelCase =F'--config_file={args.config_file} {script_name}'
__UpperCamelCase =['accelerate-launch'] + test_args.split()
__UpperCamelCase =execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _UpperCAmelCase ( ):
__UpperCamelCase =test_command_parser()
__UpperCamelCase =parser.parse_args()
test_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 701 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682 | 0 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Union[str, Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =scope
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> Any:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =DistilBertModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =DistilBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =DistilBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(
A_ , attention_mask=A_ , start_positions=A_ , end_positions=A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =self.num_labels
__UpperCamelCase =DistilBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =DistilBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =DistilBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =model(
A_ , attention_mask=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCAmelCase__ : Optional[int] = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Optional[Any] = True
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =DistilBertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , dim=37 )
def _a ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> List[Any]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =DistilBertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@slow
@require_torch_gpu
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__UpperCamelCase =True
__UpperCamelCase =model_class(config=A_ )
__UpperCamelCase =self._prepare_for_class(A_ , A_ )
__UpperCamelCase =torch.jit.trace(
A_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A_ , os.path.join(A_ , 'traced_model.pt' ) )
__UpperCamelCase =torch.jit.load(os.path.join(A_ , 'traced_model.pt' ) , map_location=A_ )
loaded(inputs_dict['input_ids'].to(A_ ) , inputs_dict['attention_mask'].to(A_ ) )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> Tuple:
__UpperCamelCase =DistilBertModel.from_pretrained('distilbert-base-uncased' )
__UpperCamelCase =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCamelCase =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCamelCase =model(A_ , attention_mask=A_ )[0]
__UpperCamelCase =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , A_ )
__UpperCamelCase =torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1E-4 ) )
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 | 0 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 703 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> str:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1e-4
def _a ( self ) -> int:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> int:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1e-4
def _a ( self ) -> List[Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 682 | 0 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any=10 , SCREAMING_SNAKE_CASE__ : List[Any]=1_00 , SCREAMING_SNAKE_CASE__ : Optional[int]=10_26 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Tuple="data/tokenized_stories_train_wikitext103.jbl" , SCREAMING_SNAKE_CASE__ : Dict="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
__UpperCamelCase , __UpperCamelCase =generate_datasets(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ , min_len=10_26 , trim=SCREAMING_SNAKE_CASE__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__UpperCamelCase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
__UpperCamelCase =load_gpta('gpt2' ).to(SCREAMING_SNAKE_CASE__ )
print('computing perplexity on objective set' )
__UpperCamelCase =compute_perplexity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).item()
print('perplexity on objective set:' , SCREAMING_SNAKE_CASE__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any]=15 , SCREAMING_SNAKE_CASE__ : Dict=1_28 , SCREAMING_SNAKE_CASE__ : List[str]=1_00 , SCREAMING_SNAKE_CASE__ : List[Any]="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
__UpperCamelCase =GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
__UpperCamelCase =SecondaryLearner(SCREAMING_SNAKE_CASE__ )
# Train secondary learner
__UpperCamelCase =train_secondary_learner(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_epochs=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , eval_freq=1_00 , igf_model_path=SCREAMING_SNAKE_CASE__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=10_00 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : Dict=1.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=recopy_gpta , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=10 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gpt2_finetuned.pt" , ):
__UpperCamelCase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
__UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =max_steps // (len(SCREAMING_SNAKE_CASE__ )) + 1
__UpperCamelCase =0
__UpperCamelCase =torch.zeros((1, context_len) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =recopy_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(SCREAMING_SNAKE_CASE__ )
secondary_learner.eval()
__UpperCamelCase =[]
__UpperCamelCase =0
__UpperCamelCase =[]
__UpperCamelCase =[]
# Compute the performance of the transformer model at the beginning
__UpperCamelCase =compute_perplexity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
test_perps.append(SCREAMING_SNAKE_CASE__ )
print('Test perplexity, step' , SCREAMING_SNAKE_CASE__ , ':' , SCREAMING_SNAKE_CASE__ )
for epoch in range(int(SCREAMING_SNAKE_CASE__ ) ):
for step, example in enumerate(SCREAMING_SNAKE_CASE__ ):
torch.cuda.empty_cache()
__UpperCamelCase =random.randint(0 , example.size(2 ) - context_len - 1 )
__UpperCamelCase =example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =True
if secondary_learner is not None:
__UpperCamelCase =secondary_learner.forward(
torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(SCREAMING_SNAKE_CASE__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__UpperCamelCase =-1
if predicted_q < threshold:
__UpperCamelCase =False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__UpperCamelCase =outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__UpperCamelCase =0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__UpperCamelCase =compute_perplexity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
test_perps.append(SCREAMING_SNAKE_CASE__ )
print('Test perplexity, step' , SCREAMING_SNAKE_CASE__ , ':' , SCREAMING_SNAKE_CASE__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=SCREAMING_SNAKE_CASE__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=1_00 , type=SCREAMING_SNAKE_CASE__ , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=1_00 , type=SCREAMING_SNAKE_CASE__ , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=10_00 , type=SCREAMING_SNAKE_CASE__ , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=1_28 , type=SCREAMING_SNAKE_CASE__ , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=SCREAMING_SNAKE_CASE__ , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=SCREAMING_SNAKE_CASE__ , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=1_00 , type=SCREAMING_SNAKE_CASE__ , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=10_26 , type=SCREAMING_SNAKE_CASE__ , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=SCREAMING_SNAKE_CASE__ , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=SCREAMING_SNAKE_CASE__ , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=SCREAMING_SNAKE_CASE__ , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=SCREAMING_SNAKE_CASE__ , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
__UpperCamelCase =joblib.load('data/IGF_values.jbl' )
# Train secondary learner
__UpperCamelCase =training_secondary_learner(
SCREAMING_SNAKE_CASE__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
__UpperCamelCase =GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__UpperCamelCase , __UpperCamelCase =generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=1_00 , min_len=10_26 , trim=SCREAMING_SNAKE_CASE__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=SCREAMING_SNAKE_CASE__ , secondary_learner=SCREAMING_SNAKE_CASE__ , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 704 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCamelCase =(self.image_size // 32) ** 2
__UpperCamelCase =num_patches + 1
def _a ( self ) -> str:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> str:
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' )
__UpperCamelCase =model(**A_ )
__UpperCamelCase =outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCamelCase =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 705 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = filter_non_english
def _a ( self ) -> Optional[Any]:
super().setUp()
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase ={}
__UpperCamelCase ={}
for i, value in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =i
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A_ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Any:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase ={}
for i, token in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =RoCBertWordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase =tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase =tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
__UpperCamelCase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ) -> List[str]:
__UpperCamelCase =['的', '人', '有']
__UpperCamelCase =''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =False
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.encode('你好' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode('你是谁' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='你好,你是谁'
__UpperCamelCase =tokenizer.tokenize(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase =tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 706 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =original_name.split('.' )[0]
__UpperCamelCase =key.split('.' )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
__UpperCamelCase =orig_block_num - offset
__UpperCamelCase =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =OrderedDict()
__UpperCamelCase , __UpperCamelCase =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase =key[: key.find('proj' )]
__UpperCamelCase =key.replace(SCREAMING_SNAKE_CASE__ , F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm1' , 'before_norm' )
if "norm2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__UpperCamelCase =key.replace('head' , 'classifier' )
__UpperCamelCase =value
return new_state_dict
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =model_name[-3:]
__UpperCamelCase =10_00
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =(1, 10_00)
# set config attributes
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase =[2, 2, 6, 2]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s24":
__UpperCamelCase =[4, 4, 12, 4]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.9
elif size == "m36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
elif size == "m48":
__UpperCamelCase =[8, 8, 24, 8]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase =rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
__UpperCamelCase =PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase =torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__UpperCamelCase =torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__UpperCamelCase =torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__UpperCamelCase =torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__UpperCamelCase =torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 682 | 0 |
import numpy as np
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray ):
return 1 / (1 + np.exp(-vector ))
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray ):
return vector * sigmoid(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_A = logging.get_logger(__name__)
# General docstring
_A = 'RegNetConfig'
# Base docstring
_A = 'facebook/regnet-y-040'
_A = [1, 1088, 7, 7]
# Image classification docstring
_A = 'facebook/regnet-y-040'
_A = 'tabby, tabby cat'
_A = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A_ , A_ = 3 , A_ = 1 , A_ = 1 , A_ = "relu" , **A_ , ) -> Dict:
super().__init__(**A_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCamelCase =tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCamelCase =tf.keras.layers.ConvaD(
filters=A_ , kernel_size=A_ , strides=A_ , padding='VALID' , groups=A_ , use_bias=A_ , name='convolution' , )
__UpperCamelCase =tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
__UpperCamelCase =ACTaFN[activation] if activation is not None else tf.identity
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase =self.convolution(self.padding(A_ ) )
__UpperCamelCase =self.normalization(A_ )
__UpperCamelCase =self.activation(A_ )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A_ , **A_ ) -> Tuple:
super().__init__(**A_ )
__UpperCamelCase =config.num_channels
__UpperCamelCase =TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def _a ( self , A_ ) -> Any:
__UpperCamelCase =shape_list(A_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCamelCase =tf.transpose(A_ , perm=(0, 2, 3, 1) )
__UpperCamelCase =self.embedder(A_ )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A_ , A_ = 2 , **A_ ) -> int:
super().__init__(**A_ )
__UpperCamelCase =tf.keras.layers.ConvaD(
filters=A_ , kernel_size=1 , strides=A_ , use_bias=A_ , name='convolution' )
__UpperCamelCase =tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def _a ( self , A_ , A_ = False ) -> tf.Tensor:
return self.normalization(self.convolution(A_ ) , training=A_ )
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A_ , A_ , **A_ ) -> Dict:
super().__init__(**A_ )
__UpperCamelCase =tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
__UpperCamelCase =[
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def _a ( self , A_ ) -> Optional[int]:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCamelCase =self.pooler(A_ )
for layer_module in self.attention:
__UpperCamelCase =layer_module(A_ )
__UpperCamelCase =hidden_state * pooled
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ ) -> int:
super().__init__(**A_ )
__UpperCamelCase =in_channels != out_channels or stride != 1
__UpperCamelCase =max(1 , out_channels // config.groups_width )
__UpperCamelCase =(
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCamelCase =[
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.2' ),
]
__UpperCamelCase =ACTaFN[config.hidden_act]
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase =hidden_state
for layer_module in self.layers:
__UpperCamelCase =layer_module(A_ )
__UpperCamelCase =self.shortcut(A_ )
hidden_state += residual
__UpperCamelCase =self.activation(A_ )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ ) -> Tuple:
super().__init__(**A_ )
__UpperCamelCase =in_channels != out_channels or stride != 1
__UpperCamelCase =max(1 , out_channels // config.groups_width )
__UpperCamelCase =(
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
__UpperCamelCase =[
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(A_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.3' ),
]
__UpperCamelCase =ACTaFN[config.hidden_act]
def _a ( self , A_ ) -> int:
__UpperCamelCase =hidden_state
for layer_module in self.layers:
__UpperCamelCase =layer_module(A_ )
__UpperCamelCase =self.shortcut(A_ )
hidden_state += residual
__UpperCamelCase =self.activation(A_ )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ = 2 , A_ = 2 , **A_ ) -> Dict:
super().__init__(**A_ )
__UpperCamelCase =TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
__UpperCamelCase =[
# downsampling is done in the first layer with stride of 2
layer(A_ , A_ , A_ , stride=A_ , name='layers.0' ),
*[layer(A_ , A_ , A_ , name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def _a ( self , A_ ) -> Optional[Any]:
for layer_module in self.layers:
__UpperCamelCase =layer_module(A_ )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A_ , **A_ ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =[]
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
__UpperCamelCase =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(A_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(A_ , A_ , A_ , depth=A_ , name=f'stages.{i+1}' ) )
def _a ( self , A_ , A_ = False , A_ = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCamelCase =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCamelCase =hidden_states + (hidden_state,)
__UpperCamelCase =stage_module(A_ )
if output_hidden_states:
__UpperCamelCase =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ )
@keras_serializable
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = RegNetConfig
def __init__( self , A_ , **A_ ) -> Optional[int]:
super().__init__(**A_ )
__UpperCamelCase =config
__UpperCamelCase =TFRegNetEmbeddings(A_ , name='embedder' )
__UpperCamelCase =TFRegNetEncoder(A_ , name='encoder' )
__UpperCamelCase =tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
@unpack_inputs
def _a ( self , A_ , A_ = None , A_ = None , A_ = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCamelCase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase =return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase =self.embedder(A_ , training=A_ )
__UpperCamelCase =self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
__UpperCamelCase =encoder_outputs[0]
__UpperCamelCase =self.pooler(A_ )
# Change to NCHW output format have uniformity in the modules
__UpperCamelCase =tf.transpose(A_ , perm=(0, 3, 1, 2) )
__UpperCamelCase =tf.transpose(A_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCamelCase =tuple([tf.transpose(A_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = RegNetConfig
UpperCAmelCase__ : Optional[int] = "regnet"
UpperCAmelCase__ : Tuple = "pixel_values"
@property
def _a ( self ) -> int:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_A = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
_A = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , A_ , )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , *A_ , **A_ ) -> str:
super().__init__(A_ , *A_ , **A_ )
__UpperCamelCase =TFRegNetMainLayer(A_ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self , A_ , A_ = None , A_ = None , A_=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCamelCase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase =return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase =self.regnet(
pixel_values=A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , A_ , )
class UpperCAmelCase__ ( A_ , A_ ):
"""simple docstring"""
def __init__( self , A_ , *A_ , **A_ ) -> Optional[Any]:
super().__init__(A_ , *A_ , **A_ )
__UpperCamelCase =config.num_labels
__UpperCamelCase =TFRegNetMainLayer(A_ , name='regnet' )
# classification head
__UpperCamelCase =[
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self , A_ = None , A_ = None , A_ = None , A_ = None , A_=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCamelCase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase =return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase =self.regnet(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
__UpperCamelCase =outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase =self.classifier[0](A_ )
__UpperCamelCase =self.classifier[1](A_ )
__UpperCamelCase =None if labels is None else self.hf_compute_loss(labels=A_ , logits=A_ )
if not return_dict:
__UpperCamelCase =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
| 708 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 1 if input_a == input_a else 0
def _UpperCAmelCase ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 682 | 0 |
_A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ):
__UpperCamelCase =True
__UpperCamelCase =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
order.append(SCREAMING_SNAKE_CASE__ )
return order
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[bool] ):
__UpperCamelCase =True
__UpperCamelCase =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return component
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict[int, list[int]] ):
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False]
__UpperCamelCase ={vert: [] for vert in range(len(SCREAMING_SNAKE_CASE__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for i, was_visited in enumerate(SCREAMING_SNAKE_CASE__ ):
if not was_visited:
order += topology_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) * [False]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =order[len(SCREAMING_SNAKE_CASE__ ) - i - 1]
if not visited[vert]:
__UpperCamelCase =find_components(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
components_list.append(SCREAMING_SNAKE_CASE__ )
return components_list
| 709 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 ):
__UpperCamelCase =right or len(SCREAMING_SNAKE_CASE__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_A = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , *A_ , **A_ ) -> List[Any]:
super().__init__(*A_ , **A_ )
self.check_model_type(A_ )
def _a ( self , A_=None , A_=None , A_=None , **A_ ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase ={}, {}
if padding is not None:
__UpperCamelCase =padding
if truncation is not None:
__UpperCamelCase =truncation
if top_k is not None:
__UpperCamelCase =top_k
return preprocess_params, {}, postprocess_params
def __call__( self , A_ , A_ = None , **A_ ) -> Tuple:
if isinstance(A_ , (Image.Image, str) ) and isinstance(A_ , A_ ):
__UpperCamelCase ={'image': image, 'question': question}
else:
__UpperCamelCase =image
__UpperCamelCase =super().__call__(A_ , **A_ )
return results
def _a ( self , A_ , A_=False , A_=False ) -> Dict:
__UpperCamelCase =load_image(inputs['image'] )
__UpperCamelCase =self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=A_ , truncation=A_ )
__UpperCamelCase =self.image_processor(images=A_ , return_tensors=self.framework )
model_inputs.update(A_ )
return model_inputs
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =self.model(**A_ )
return model_outputs
def _a ( self , A_ , A_=5 ) -> str:
if top_k > self.model.config.num_labels:
__UpperCamelCase =self.model.config.num_labels
if self.framework == "pt":
__UpperCamelCase =model_outputs.logits.sigmoid()[0]
__UpperCamelCase , __UpperCamelCase =probs.topk(A_ )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__UpperCamelCase =scores.tolist()
__UpperCamelCase =ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(A_ , A_ )]
| 711 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_A = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=A_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _a ( self ) -> Dict:
__UpperCamelCase =super().to_dict()
for k, v in d.items():
if isinstance(A_ , A_ ):
__UpperCamelCase =v.to_dict()
return d
| 682 | 0 |
from typing import Dict, Optional
import numpy as np
import datasets
_A = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
_A = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
_A = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCamelCase =new_id
# turn into Numpy arrays
__UpperCamelCase =np.array(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =np.array(SCREAMING_SNAKE_CASE__ )
if reduce_labels:
__UpperCamelCase =2_55
__UpperCamelCase =label - 1
__UpperCamelCase =2_55
__UpperCamelCase =label != ignore_index
__UpperCamelCase =np.not_equal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =pred_label[mask]
__UpperCamelCase =np.array(SCREAMING_SNAKE_CASE__ )[mask]
__UpperCamelCase =pred_label[pred_label == label]
__UpperCamelCase =np.histogram(SCREAMING_SNAKE_CASE__ , bins=SCREAMING_SNAKE_CASE__ , range=(0, num_labels - 1) )[0]
__UpperCamelCase =np.histogram(SCREAMING_SNAKE_CASE__ , bins=SCREAMING_SNAKE_CASE__ , range=(0, num_labels - 1) )[0]
__UpperCamelCase =np.histogram(SCREAMING_SNAKE_CASE__ , bins=SCREAMING_SNAKE_CASE__ , range=(0, num_labels - 1) )[0]
__UpperCamelCase =area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ):
__UpperCamelCase =np.zeros((num_labels,) , dtype=np.floataa )
__UpperCamelCase =np.zeros((num_labels,) , dtype=np.floataa )
__UpperCamelCase =np.zeros((num_labels,) , dtype=np.floataa )
__UpperCamelCase =np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =intersect_and_union(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =total_intersect_and_union(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# compute metrics
__UpperCamelCase ={}
__UpperCamelCase =total_area_intersect.sum() / total_area_label.sum()
__UpperCamelCase =total_area_intersect / total_area_union
__UpperCamelCase =total_area_intersect / total_area_label
__UpperCamelCase =np.nanmean(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =np.nanmean(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =all_acc
__UpperCamelCase =iou
__UpperCamelCase =acc
if nan_to_num is not None:
__UpperCamelCase ={metric: np.nan_to_num(SCREAMING_SNAKE_CASE__ , nan=SCREAMING_SNAKE_CASE__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def _a ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def _a ( self , A_ , A_ , A_ , A_ , A_ = None , A_ = None , A_ = False , ) -> Optional[int]:
__UpperCamelCase =mean_iou(
results=A_ , gt_seg_maps=A_ , num_labels=A_ , ignore_index=A_ , nan_to_num=A_ , label_map=A_ , reduce_labels=A_ , )
return iou_result
| 712 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> List[Any]:
__UpperCamelCase =tempfile.mkdtemp()
__UpperCamelCase =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__UpperCamelCase ={
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
'do_convert_rgb': True,
}
__UpperCamelCase =os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(A_ , A_ )
def _a ( self , **A_ ) -> Optional[int]:
return BertTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , **A_ ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , **A_ ) -> Any:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def _a ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Dict:
__UpperCamelCase =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCamelCase =[Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_rust_tokenizer()
__UpperCamelCase =self.get_image_processor()
__UpperCamelCase =ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCamelCase =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
__UpperCamelCase =ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCamelCase =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
__UpperCamelCase =self.get_image_processor(do_normalize=A_ )
__UpperCamelCase =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_image_processor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase =self.prepare_image_inputs()
__UpperCamelCase =image_processor(A_ , return_tensors='np' )
__UpperCamelCase =processor(images=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> str:
__UpperCamelCase =self.get_image_processor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase ='Alexandra,T-shirt的价格是15便士。'
__UpperCamelCase =processor(text=A_ )
__UpperCamelCase =tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_image_processor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase ='Alexandra,T-shirt的价格是15便士。'
__UpperCamelCase =self.prepare_image_inputs()
__UpperCamelCase =processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.get_image_processor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(A_ )
__UpperCamelCase =tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.get_image_processor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase ='Alexandra,T-shirt的价格是15便士。'
__UpperCamelCase =self.prepare_image_inputs()
__UpperCamelCase =processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 713 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = filter_non_english
def _a ( self ) -> Optional[Any]:
super().setUp()
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase ={}
__UpperCamelCase ={}
for i, value in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =i
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A_ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Any:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase ={}
for i, token in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =RoCBertWordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase =tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase =tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
__UpperCamelCase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ) -> List[str]:
__UpperCamelCase =['的', '人', '有']
__UpperCamelCase =''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =False
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.encode('你好' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode('你是谁' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='你好,你是谁'
__UpperCamelCase =tokenizer.tokenize(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase =tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 682 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = "naver-clova-ix/donut-base-finetuned-docvqa"
UpperCAmelCase__ : List[Any] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
UpperCAmelCase__ : List[Any] = "document_qa"
UpperCAmelCase__ : Optional[int] = AutoProcessor
UpperCAmelCase__ : Optional[Any] = VisionEncoderDecoderModel
UpperCAmelCase__ : str = ["image", "text"]
UpperCAmelCase__ : int = ["text"]
def __init__( self , *A_ , **A_ ) -> int:
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*A_ , **A_ )
def _a ( self , A_ , A_ ) -> List[str]:
__UpperCamelCase ='<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__UpperCamelCase =task_prompt.replace('{user_input}' , A_ )
__UpperCamelCase =self.pre_processor.tokenizer(
A_ , add_special_tokens=A_ , return_tensors='pt' ).input_ids
__UpperCamelCase =self.pre_processor(A_ , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _a ( self , A_ ) -> Tuple:
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=A_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=A_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=A_ , ).sequences
def _a ( self , A_ ) -> List[str]:
__UpperCamelCase =self.pre_processor.batch_decode(A_ )[0]
__UpperCamelCase =sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__UpperCamelCase =sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__UpperCamelCase =re.sub(r'<.*?>' , '' , A_ , count=1 ).strip() # remove first task start token
__UpperCamelCase =self.pre_processor.tokenajson(A_ )
return sequence["answer"]
| 714 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A = random.Random()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
if rng is None:
__UpperCamelCase =global_rng
__UpperCamelCase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =min_seq_length
__UpperCamelCase =max_seq_length
__UpperCamelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase =padding_value
__UpperCamelCase =sampling_rate
__UpperCamelCase =return_attention_mask
__UpperCamelCase =do_normalize
__UpperCamelCase =feature_size
__UpperCamelCase =chunk_length
__UpperCamelCase =hop_length
def _a ( self ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , A_=False , A_=False ) -> Any:
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase =[np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
__UpperCamelCase =WhisperFeatureExtractionTester(self )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase =self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase =self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCamelCase =feature_extractor(A_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCamelCase =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCamelCase =np.asarray(A_ )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
__UpperCamelCase =[x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self ) -> Dict:
import torch
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCamelCase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__UpperCamelCase =ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__UpperCamelCase =self._load_datasamples(1 )
__UpperCamelCase =WhisperFeatureExtractor()
__UpperCamelCase =feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =self._load_datasamples(1 )[0]
__UpperCamelCase =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCamelCase =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 682 | 0 |
_A = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
# Return True if there is node that has not iterated.
__UpperCamelCase =[False] * len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[s]
__UpperCamelCase =True
while queue:
__UpperCamelCase =queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =True
__UpperCamelCase =u
return visited[t]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
__UpperCamelCase =[-1] * (len(SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =0
__UpperCamelCase =[]
__UpperCamelCase =[i[:] for i in graph] # Record original cut, copy.
while bfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =float('Inf' )
__UpperCamelCase =sink
while s != source:
# Find the minimum value in select path
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , graph[parent[s]][s] )
__UpperCamelCase =parent[s]
max_flow += path_flow
__UpperCamelCase =sink
while v != source:
__UpperCamelCase =parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__UpperCamelCase =parent[v]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 715 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
_A = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
_A = {
'allenai/longformer-base-4096': 4096,
'allenai/longformer-large-4096': 4096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCAmelCase ( ):
__UpperCamelCase =(
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__UpperCamelCase =bs[:]
__UpperCamelCase =0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE__ )
cs.append(2**8 + n )
n += 1
__UpperCamelCase =[chr(SCREAMING_SNAKE_CASE__ ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =set()
__UpperCamelCase =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCamelCase =char
return pairs
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , **A_ , ) -> Tuple:
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase =json.load(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =errors # how to handle errors in decoding
__UpperCamelCase =bytes_to_unicode()
__UpperCamelCase ={v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
__UpperCamelCase =merges_handle.read().split('\n' )[1:-1]
__UpperCamelCase =[tuple(merge.split() ) for merge in bpe_merges]
__UpperCamelCase =dict(zip(A_ , range(len(A_ ) ) ) )
__UpperCamelCase ={}
__UpperCamelCase =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCamelCase =re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def _a ( self ) -> List[str]:
return len(self.encoder )
def _a ( self ) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self , A_ ) -> List[Any]:
if token in self.cache:
return self.cache[token]
__UpperCamelCase =tuple(A_ )
__UpperCamelCase =get_pairs(A_ )
if not pairs:
return token
while True:
__UpperCamelCase =min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCamelCase , __UpperCamelCase =bigram
__UpperCamelCase =[]
__UpperCamelCase =0
while i < len(A_ ):
try:
__UpperCamelCase =word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCamelCase =j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCamelCase =tuple(A_ )
__UpperCamelCase =new_word
if len(A_ ) == 1:
break
else:
__UpperCamelCase =get_pairs(A_ )
__UpperCamelCase =' '.join(A_ )
__UpperCamelCase =word
return word
def _a ( self , A_ ) -> int:
__UpperCamelCase =[]
for token in re.findall(self.pat , A_ ):
__UpperCamelCase =''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) )
return bpe_tokens
def _a ( self , A_ ) -> Union[str, Any]:
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def _a ( self , A_ ) -> Any:
return self.decoder.get(A_ )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =''.join(A_ )
__UpperCamelCase =bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
__UpperCamelCase =0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
__UpperCamelCase =token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self , A_ , A_=False , **A_ ) -> Optional[int]:
__UpperCamelCase =kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
__UpperCamelCase =' ' + text
return (text, kwargs)
| 716 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
# ===== initialization =====
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
__UpperCamelCase =os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__UpperCamelCase =json.loads(SCREAMING_SNAKE_CASE__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__UpperCamelCase =os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__UpperCamelCase =json.loads(SCREAMING_SNAKE_CASE__ )
if not mpi_options.get('sagemaker_mpi_enabled' , SCREAMING_SNAKE_CASE__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def _a ( self ) -> Optional[Any]:
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , A_ , )
@cached_property
def _a ( self ) -> "torch.device":
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
__UpperCamelCase =torch.device('cpu' )
__UpperCamelCase =0
elif is_sagemaker_model_parallel_available():
__UpperCamelCase =smp.local_rank()
__UpperCamelCase =torch.device('cuda' , A_ )
__UpperCamelCase =1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
__UpperCamelCase =int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
__UpperCamelCase =torch.device('cuda' , self.local_rank )
__UpperCamelCase =1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__UpperCamelCase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__UpperCamelCase =torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
__UpperCamelCase =torch.device('cuda' , self.local_rank )
__UpperCamelCase =1
if device.type == "cuda":
torch.cuda.set_device(A_ )
return device
@property
def _a ( self ) -> List[Any]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _a ( self ) -> int:
return not is_sagemaker_model_parallel_available()
@property
def _a ( self ) -> List[str]:
return False
| 717 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =xa
__UpperCamelCase =xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__UpperCamelCase =x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCamelCase =x_na
__UpperCamelCase =x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 682 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "xlm-roberta"
def __init__( self , A_=30522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1E-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Dict:
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =position_embedding_type
__UpperCamelCase =use_cache
__UpperCamelCase =classifier_dropout
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 718 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 682 | 0 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 637_8137.0
_A = 635_6752.31_4245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCamelCase =(self.image_size // 32) ** 2
__UpperCamelCase =num_patches + 1
def _a ( self ) -> str:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> str:
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' )
__UpperCamelCase =model(**A_ )
__UpperCamelCase =outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCamelCase =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 682 | 0 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = ConsistencyModelPipeline
UpperCAmelCase__ : List[Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCAmelCase__ : List[str] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
UpperCAmelCase__ : Optional[Any] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def _a ( self ) -> int:
__UpperCamelCase =UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _a ( self ) -> Optional[int]:
__UpperCamelCase =UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _a ( self , A_=False ) -> Optional[int]:
if class_cond:
__UpperCamelCase =self.dummy_cond_unet
else:
__UpperCamelCase =self.dummy_uncond_unet
# Default to CM multistep sampler
__UpperCamelCase =CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCamelCase ={
'unet': unet,
'scheduler': scheduler,
}
return components
def _a ( self , A_ , A_=0 ) -> int:
if str(A_ ).startswith('mps' ):
__UpperCamelCase =torch.manual_seed(A_ )
else:
__UpperCamelCase =torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase ={
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _a ( self ) -> Optional[int]:
__UpperCamelCase ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase =self.get_dummy_components()
__UpperCamelCase =ConsistencyModelPipeline(**A_ )
__UpperCamelCase =pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =self.get_dummy_inputs(A_ )
__UpperCamelCase =pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase =self.get_dummy_components(class_cond=A_ )
__UpperCamelCase =ConsistencyModelPipeline(**A_ )
__UpperCamelCase =pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =self.get_dummy_inputs(A_ )
__UpperCamelCase =0
__UpperCamelCase =pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self ) -> Optional[int]:
__UpperCamelCase ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase =self.get_dummy_components()
__UpperCamelCase =ConsistencyModelPipeline(**A_ )
__UpperCamelCase =pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =self.get_dummy_inputs(A_ )
__UpperCamelCase =1
__UpperCamelCase =None
__UpperCamelCase =pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self ) -> List[Any]:
__UpperCamelCase ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase =self.get_dummy_components(class_cond=A_ )
__UpperCamelCase =ConsistencyModelPipeline(**A_ )
__UpperCamelCase =pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =self.get_dummy_inputs(A_ )
__UpperCamelCase =1
__UpperCamelCase =None
__UpperCamelCase =0
__UpperCamelCase =pipe(**A_ ).images
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , A_=0 , A_=False , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) -> Any:
__UpperCamelCase =torch.manual_seed(A_ )
__UpperCamelCase ={
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__UpperCamelCase =self.get_fixed_latents(seed=A_ , device=A_ , dtype=A_ , shape=A_ )
__UpperCamelCase =latents
return inputs
def _a ( self , A_=0 , A_="cpu" , A_=torch.floataa , A_=(1, 3, 64, 64) ) -> List[str]:
if type(A_ ) == str:
__UpperCamelCase =torch.device(A_ )
__UpperCamelCase =torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase =randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
return latents
def _a ( self ) -> int:
__UpperCamelCase =UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__UpperCamelCase =CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCamelCase =ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =self.get_inputs()
__UpperCamelCase =pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__UpperCamelCase =CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCamelCase =ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =self.get_inputs()
__UpperCamelCase =1
__UpperCamelCase =None
__UpperCamelCase =pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _a ( self ) -> Tuple:
__UpperCamelCase =UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__UpperCamelCase =CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCamelCase =ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =self.get_inputs(get_fixed_latents=A_ , device=A_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ):
__UpperCamelCase =pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _a ( self ) -> Any:
__UpperCamelCase =UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__UpperCamelCase =CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCamelCase =ConsistencyModelPipeline(unet=A_ , scheduler=A_ )
pipe.to(torch_device=A_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =self.get_inputs(get_fixed_latents=A_ , device=A_ )
__UpperCamelCase =1
__UpperCamelCase =None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A_ , enable_math=A_ , enable_mem_efficient=A_ ):
__UpperCamelCase =pipe(**A_ ).images
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 720 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : LevitConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase =timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 1_92:
__UpperCamelCase =timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 2_56:
__UpperCamelCase =timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 3_84:
__UpperCamelCase =timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE__ )
from_model.eval()
__UpperCamelCase =LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =OrderedDict()
__UpperCamelCase =from_model.state_dict()
__UpperCamelCase =list(from_model.state_dict().keys() )
__UpperCamelCase =list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
__UpperCamelCase =name
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ):
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__UpperCamelCase ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 682 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def _a ( self ) -> Any:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__UpperCamelCase =AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
__UpperCamelCase =TFAutoModel.from_pretrained(A_ , from_pt=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
__UpperCamelCase =AutoModel.from_pretrained(A_ , from_tf=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def _a ( self ) -> Union[str, Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__UpperCamelCase =AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
__UpperCamelCase =TFAutoModelForPreTraining.from_pretrained(A_ , from_pt=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
__UpperCamelCase =AutoModelForPreTraining.from_pretrained(A_ , from_tf=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def _a ( self ) -> Tuple:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
__UpperCamelCase =TFAutoModelForCausalLM.from_pretrained(A_ , from_pt=A_ )
__UpperCamelCase , __UpperCamelCase =TFAutoModelForCausalLM.from_pretrained(
A_ , output_loading_info=A_ , from_pt=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
__UpperCamelCase =AutoModelForCausalLM.from_pretrained(A_ , from_tf=A_ )
__UpperCamelCase , __UpperCamelCase =AutoModelForCausalLM.from_pretrained(
A_ , output_loading_info=A_ , from_tf=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def _a ( self ) -> Any:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
__UpperCamelCase =TFAutoModelWithLMHead.from_pretrained(A_ , from_pt=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
__UpperCamelCase =AutoModelWithLMHead.from_pretrained(A_ , from_tf=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def _a ( self ) -> List[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
__UpperCamelCase =TFAutoModelForMaskedLM.from_pretrained(A_ , from_pt=A_ )
__UpperCamelCase , __UpperCamelCase =TFAutoModelForMaskedLM.from_pretrained(
A_ , output_loading_info=A_ , from_pt=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
__UpperCamelCase =AutoModelForMaskedLM.from_pretrained(A_ , from_tf=A_ )
__UpperCamelCase , __UpperCamelCase =AutoModelForMaskedLM.from_pretrained(
A_ , output_loading_info=A_ , from_tf=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def _a ( self ) -> List[str]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
__UpperCamelCase =TFAutoModelForSeqaSeqLM.from_pretrained(A_ , from_pt=A_ )
__UpperCamelCase , __UpperCamelCase =TFAutoModelForSeqaSeqLM.from_pretrained(
A_ , output_loading_info=A_ , from_pt=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
__UpperCamelCase =AutoModelForSeqaSeqLM.from_pretrained(A_ , from_tf=A_ )
__UpperCamelCase , __UpperCamelCase =AutoModelForSeqaSeqLM.from_pretrained(
A_ , output_loading_info=A_ , from_tf=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def _a ( self ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__UpperCamelCase =AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
__UpperCamelCase =TFAutoModelForSequenceClassification.from_pretrained(A_ , from_pt=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
__UpperCamelCase =AutoModelForSequenceClassification.from_pretrained(A_ , from_tf=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
@slow
def _a ( self ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__UpperCamelCase =AutoConfig.from_pretrained(A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
__UpperCamelCase =TFAutoModelForQuestionAnswering.from_pretrained(A_ , from_pt=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
__UpperCamelCase =AutoModelForQuestionAnswering.from_pretrained(A_ , from_tf=A_ )
self.assertIsNotNone(A_ )
self.assertIsInstance(A_ , A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =TFAutoModelWithLMHead.from_pretrained(A_ , from_pt=A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
__UpperCamelCase =AutoModelWithLMHead.from_pretrained(A_ , from_tf=A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
def _a ( self ) -> Dict:
__UpperCamelCase =TFAutoModelWithLMHead.from_pretrained(A_ , from_pt=A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
__UpperCamelCase =AutoModelWithLMHead.from_pretrained(A_ , from_tf=A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=A_ ) , 14410 )
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase ='laion/clap-htsat-unfused'
__UpperCamelCase =tempfile.mkdtemp()
def _a ( self , **A_ ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def _a ( self , **A_ ) -> Dict:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _a ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> str:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> int:
__UpperCamelCase =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase =self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =floats_list((3, 1000) )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' )
__UpperCamelCase =processor(audios=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase ='This is a test string'
__UpperCamelCase =processor(text=A_ )
__UpperCamelCase =tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(A_ )
__UpperCamelCase =tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 682 | 0 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_A = '__DUMMY_TRANSFORMERS_USER__'
_A = 'Dummy User'
_A = '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'
_A = 'https://hub-ci.huggingface.co'
_A = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
_A = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
_A = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , SCREAMING_SNAKE_CASE__ )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( ):
return HfApi(endpoint=SCREAMING_SNAKE_CASE__ )
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : HfApi ):
__UpperCamelCase =HfFolder.get_token()
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
def _cleanup_repo(SCREAMING_SNAKE_CASE__ : Dict ):
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
@contextmanager
def _temporary_repo(SCREAMING_SNAKE_CASE__ : List[Any] ):
try:
yield repo_id
finally:
cleanup_repo(SCREAMING_SNAKE_CASE__ )
return _temporary_repo
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : HfApi , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =F'repo_txt_data-{int(time.time() * 10E3 )}'
__UpperCamelCase =F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='dataset' , private=SCREAMING_SNAKE_CASE__ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo='data/text_data.txt' , repo_id=SCREAMING_SNAKE_CASE__ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : HfApi , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =F'repo_zipped_txt_data-{int(time.time() * 10E3 )}'
__UpperCamelCase =F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='dataset' , private=SCREAMING_SNAKE_CASE__ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo='data.zip' , repo_id=SCREAMING_SNAKE_CASE__ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : HfApi , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =F'repo_zipped_img_data-{int(time.time() * 10E3 )}'
__UpperCamelCase =F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='dataset' , private=SCREAMING_SNAKE_CASE__ )
hf_api.upload_file(
token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo='data.zip' , repo_id=SCREAMING_SNAKE_CASE__ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ):
return hf_private_dataset_repo_zipped_img_data_
| 700 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('test' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__UpperCamelCase =script_name
else:
__UpperCamelCase =F'--config_file={args.config_file} {script_name}'
__UpperCamelCase =['accelerate-launch'] + test_args.split()
__UpperCamelCase =execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _UpperCAmelCase ( ):
__UpperCamelCase =test_command_parser()
__UpperCamelCase =parser.parse_args()
test_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F'{test_file} instead.' )
__UpperCamelCase =components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
__UpperCamelCase =components[:-1] + [test_fn.replace('.py' , '' )]
__UpperCamelCase ='.'.join(SCREAMING_SNAKE_CASE__ )
return test_module_path
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =get_module_path(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =importlib.import_module(SCREAMING_SNAKE_CASE__ )
return test_module
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =[]
__UpperCamelCase =get_test_module(SCREAMING_SNAKE_CASE__ )
for attr in dir(SCREAMING_SNAKE_CASE__ ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =[]
__UpperCamelCase =get_test_module(SCREAMING_SNAKE_CASE__ )
for attr in dir(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , 'all_model_classes' , [] )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
test_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =get_test_classes(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =test_class()
if hasattr(SCREAMING_SNAKE_CASE__ , 'setUp' ):
test.setUp()
__UpperCamelCase =None
if hasattr(SCREAMING_SNAKE_CASE__ , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__UpperCamelCase =test.model_tester.__class__
return model_tester
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =get_test_classes(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =get_test_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for test_class in test_classes:
__UpperCamelCase =get_model_tester_from_test_class(SCREAMING_SNAKE_CASE__ )
if tester_class is not None:
tester_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =get_test_classes(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={test_class: get_model_tester_from_test_class(SCREAMING_SNAKE_CASE__ ) for test_class in test_classes}
return test_tester_mapping
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =get_model_classes(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
model_class: get_test_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for model_class in model_classes
}
return model_test_mapping
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =get_model_classes(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
model_class: get_tester_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for model_class in model_classes
}
return model_to_tester_mapping
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return o
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return o.__name__
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return [to_json(SCREAMING_SNAKE_CASE__ ) for x in o]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return {to_json(SCREAMING_SNAKE_CASE__ ): to_json(SCREAMING_SNAKE_CASE__ ) for k, v in o.items()}
else:
return o
| 701 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682 | 0 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_A = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 | 0 |
from ...configuration_utils import PretrainedConfig
_A = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = "tapas"
def __init__( self , A_=30522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=1024 , A_=[3, 256, 256, 2, 256, 256, 10] , A_=0.02 , A_=1E-12 , A_=0 , A_=10.0 , A_=0 , A_=1.0 , A_=None , A_=1.0 , A_=False , A_=None , A_=1.0 , A_=1.0 , A_=False , A_=False , A_="ratio" , A_=None , A_=None , A_=64 , A_=32 , A_=False , A_=True , A_=False , A_=False , A_=True , A_=False , A_=None , A_=None , **A_ , ) -> str:
super().__init__(pad_token_id=A_ , **A_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_sizes
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
# Fine-tuning task hyperparameters
__UpperCamelCase =positive_label_weight
__UpperCamelCase =num_aggregation_labels
__UpperCamelCase =aggregation_loss_weight
__UpperCamelCase =use_answer_as_supervision
__UpperCamelCase =answer_loss_importance
__UpperCamelCase =use_normalized_answer_loss
__UpperCamelCase =huber_loss_delta
__UpperCamelCase =temperature
__UpperCamelCase =aggregation_temperature
__UpperCamelCase =use_gumbel_for_cells
__UpperCamelCase =use_gumbel_for_aggregation
__UpperCamelCase =average_approximation_function
__UpperCamelCase =cell_selection_preference
__UpperCamelCase =answer_loss_cutoff
__UpperCamelCase =max_num_rows
__UpperCamelCase =max_num_columns
__UpperCamelCase =average_logits_per_cell
__UpperCamelCase =select_one_column
__UpperCamelCase =allow_empty_column_selection
__UpperCamelCase =init_cell_selection_weights_to_zero
__UpperCamelCase =reset_position_index_per_cell
__UpperCamelCase =disable_per_token_loss
# Aggregation hyperparameters
__UpperCamelCase =aggregation_labels
__UpperCamelCase =no_aggregation_label_index
if isinstance(self.aggregation_labels , A_ ):
__UpperCamelCase ={int(A_ ): v for k, v in aggregation_labels.items()}
| 703 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> str:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1e-4
def _a ( self ) -> int:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> int:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1e-4
def _a ( self ) -> List[Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 682 | 0 |
from __future__ import annotations
import math
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =str(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[n]
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if len(str(SCREAMING_SNAKE_CASE__ ) ) > 3:
if not is_prime(int(str(SCREAMING_SNAKE_CASE__ )[-3:] ) ) or not is_prime(int(str(SCREAMING_SNAKE_CASE__ )[:3] ) ):
return False
return True
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 11 ):
__UpperCamelCase =[]
__UpperCamelCase =13
while len(SCREAMING_SNAKE_CASE__ ) != count:
if validate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =list_truncated_nums(SCREAMING_SNAKE_CASE__ )
if all(is_prime(SCREAMING_SNAKE_CASE__ ) for i in list_nums ):
list_truncated_primes.append(SCREAMING_SNAKE_CASE__ )
num += 2
return list_truncated_primes
def _UpperCAmelCase ( ):
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"""{sum(compute_truncated_primes(11)) = }""")
| 704 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682 | 0 |
'''simple docstring'''
import functools
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
@functools.cache
def min_distance(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__UpperCamelCase =int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , SCREAMING_SNAKE_CASE__ ) , 1 + min_distance(SCREAMING_SNAKE_CASE__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =prime_factors(SCREAMING_SNAKE_CASE__ )
if is_square_free(SCREAMING_SNAKE_CASE__ ):
return -1 if len(SCREAMING_SNAKE_CASE__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =original_name.split('.' )[0]
__UpperCamelCase =key.split('.' )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
__UpperCamelCase =orig_block_num - offset
__UpperCamelCase =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =OrderedDict()
__UpperCamelCase , __UpperCamelCase =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase =key[: key.find('proj' )]
__UpperCamelCase =key.replace(SCREAMING_SNAKE_CASE__ , F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm1' , 'before_norm' )
if "norm2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__UpperCamelCase =key.replace('head' , 'classifier' )
__UpperCamelCase =value
return new_state_dict
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =model_name[-3:]
__UpperCamelCase =10_00
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =(1, 10_00)
# set config attributes
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase =[2, 2, 6, 2]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s24":
__UpperCamelCase =[4, 4, 12, 4]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.9
elif size == "m36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
elif size == "m48":
__UpperCamelCase =[8, 8, 24, 8]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase =rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
__UpperCamelCase =PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase =torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__UpperCamelCase =torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__UpperCamelCase =torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__UpperCamelCase =torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__UpperCamelCase =torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 682 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_A = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def _UpperCAmelCase ( ):
__UpperCamelCase =_ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__UpperCamelCase =get_sagemaker_input()
else:
__UpperCamelCase =get_cluster_input()
return config
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('config' , description=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate config command' , description=SCREAMING_SNAKE_CASE__ )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =get_user_input()
if args.config_file is not None:
__UpperCamelCase =args.config_file
else:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
os.makedirs(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(SCREAMING_SNAKE_CASE__ )
else:
config.to_yaml_file(SCREAMING_SNAKE_CASE__ )
print(F'accelerate configuration saved at {config_file}' )
def _UpperCAmelCase ( ):
__UpperCamelCase =config_command_parser()
__UpperCamelCase =parser.parse_args()
config_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 707 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
import numpy as np
from transformers import Pipeline
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =np.max(SCREAMING_SNAKE_CASE__ , axis=-1 , keepdims=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self , **A_ ) -> Union[str, Any]:
__UpperCamelCase ={}
if "second_text" in kwargs:
__UpperCamelCase =kwargs['second_text']
return preprocess_kwargs, {}, {}
def _a ( self , A_ , A_=None ) -> Dict:
return self.tokenizer(A_ , text_pair=A_ , return_tensors=self.framework )
def _a ( self , A_ ) -> Any:
return self.model(**A_ )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =model_outputs.logits[0].numpy()
__UpperCamelCase =softmax(A_ )
__UpperCamelCase =np.argmax(A_ )
__UpperCamelCase =self.model.config.idalabel[best_class]
__UpperCamelCase =probabilities[best_class].item()
__UpperCamelCase =logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 708 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 1 if input_a == input_a else 0
def _UpperCAmelCase ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 682 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 ):
__UpperCamelCase =right or len(SCREAMING_SNAKE_CASE__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ):
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[float] ):
if point:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ):
__UpperCamelCase =(
'Expected a list of numbers as input, found '
F'{type(SCREAMING_SNAKE_CASE__ ).__name__}'
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =F'Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE__ ).__name__}'
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError('Missing an input' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ):
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[int | float], int | float] , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int | float , SCREAMING_SNAKE_CASE__ : int = 1_00 , ):
__UpperCamelCase =x_start
__UpperCamelCase =fnc(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =0.0
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Approximates curve as a sequence of linear lines and sums their length
__UpperCamelCase =(x_end - x_start) / steps + xa
__UpperCamelCase =fnc(SCREAMING_SNAKE_CASE__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__UpperCamelCase =xa
__UpperCamelCase =fxa
return length
if __name__ == "__main__":
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
_A = 10
while i <= 10_0000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 711 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_A = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=A_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _a ( self ) -> Dict:
__UpperCamelCase =super().to_dict()
for k, v in d.items():
if isinstance(A_ , A_ ):
__UpperCamelCase =v.to_dict()
return d
| 682 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = FlaxAutoencoderKL
@property
def _a ( self ) -> int:
__UpperCamelCase =4
__UpperCamelCase =3
__UpperCamelCase =(32, 32)
__UpperCamelCase =jax.random.PRNGKey(0 )
__UpperCamelCase =jax.random.uniform(A_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _a ( self ) -> Optional[Any]:
__UpperCamelCase ={
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__UpperCamelCase =self.dummy_input
return init_dict, inputs_dict
| 712 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A : Tuple = 16
_A : int = 32
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int = 16 ):
__UpperCamelCase =AutoTokenizer.from_pretrained('bert-base-cased' )
__UpperCamelCase =load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Any ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase =datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase =16
elif accelerator.mixed_precision != "no":
__UpperCamelCase =8
else:
__UpperCamelCase =None
return tokenizer.pad(
SCREAMING_SNAKE_CASE__ , padding='longest' , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , )
# Instantiate dataloaders.
__UpperCamelCase =DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_A : List[Any] = mocked_dataloaders # noqa: F811
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , SCREAMING_SNAKE_CASE__ ) == "1":
__UpperCamelCase =2
# Initialize accelerator
__UpperCamelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase =config['lr']
__UpperCamelCase =int(config['num_epochs'] )
__UpperCamelCase =int(config['seed'] )
__UpperCamelCase =int(config['batch_size'] )
__UpperCamelCase =evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=SCREAMING_SNAKE_CASE__ )
def inner_training_loop(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase =model.to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase =AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Instantiate scheduler
__UpperCamelCase =get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCamelCase =model(**SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.loss
accelerator.backward(SCREAMING_SNAKE_CASE__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase =model(**SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits.argmax(dim=-1 )
__UpperCamelCase , __UpperCamelCase =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , )
__UpperCamelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , SCREAMING_SNAKE_CASE__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__UpperCamelCase =parser.parse_args()
__UpperCamelCase ={'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 713 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = filter_non_english
def _a ( self ) -> Optional[Any]:
super().setUp()
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase ={}
__UpperCamelCase ={}
for i, value in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =i
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A_ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Any:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase ={}
for i, token in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =RoCBertWordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase =tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase =tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
__UpperCamelCase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ) -> List[str]:
__UpperCamelCase =['的', '人', '有']
__UpperCamelCase =''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =False
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.encode('你好' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode('你是谁' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='你好,你是谁'
__UpperCamelCase =tokenizer.tokenize(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase =tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 682 | 0 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _UpperCAmelCase ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _UpperCAmelCase ( ):
__UpperCamelCase ='mock-s3-bucket'
__UpperCamelCase =F's3://{mock_bucket}'
__UpperCamelCase =extract_path_from_uri(SCREAMING_SNAKE_CASE__ )
assert dataset_path.startswith('s3://' ) is False
__UpperCamelCase ='./local/path'
__UpperCamelCase =extract_path_from_uri(SCREAMING_SNAKE_CASE__ )
assert dataset_path == new_dataset_path
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =is_remote_filesystem(SCREAMING_SNAKE_CASE__ )
assert is_remote is True
__UpperCamelCase =fsspec.filesystem('file' )
__UpperCamelCase =is_remote_filesystem(SCREAMING_SNAKE_CASE__ )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
__UpperCamelCase =input_paths[compression_fs_class.protocol]
if input_path is None:
__UpperCamelCase =F'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =fsspec.filesystem(compression_fs_class.protocol , fo=SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.basename(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f, open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase ={'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
__UpperCamelCase =compressed_file_paths[protocol]
__UpperCamelCase ='dataset.jsonl'
__UpperCamelCase =F'{protocol}://{member_file_path}::{compressed_file_path}'
__UpperCamelCase , *__UpperCamelCase =fsspec.get_fs_token_paths(SCREAMING_SNAKE_CASE__ )
assert fs.isfile(SCREAMING_SNAKE_CASE__ )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =hf_api.dataset_info(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =HfFileSystem(repo_info=SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(SCREAMING_SNAKE_CASE__ ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def _UpperCAmelCase ( ):
__UpperCamelCase ='bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , clobber=SCREAMING_SNAKE_CASE__ )
with pytest.warns(SCREAMING_SNAKE_CASE__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(SCREAMING_SNAKE_CASE__ ) == 1
assert (
str(warning_info[0].message )
== F'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 714 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A = random.Random()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
if rng is None:
__UpperCamelCase =global_rng
__UpperCamelCase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =min_seq_length
__UpperCamelCase =max_seq_length
__UpperCamelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase =padding_value
__UpperCamelCase =sampling_rate
__UpperCamelCase =return_attention_mask
__UpperCamelCase =do_normalize
__UpperCamelCase =feature_size
__UpperCamelCase =chunk_length
__UpperCamelCase =hop_length
def _a ( self ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , A_=False , A_=False ) -> Any:
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase =[np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
__UpperCamelCase =WhisperFeatureExtractionTester(self )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase =self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase =self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCamelCase =feature_extractor(A_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCamelCase =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCamelCase =np.asarray(A_ )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
__UpperCamelCase =[x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self ) -> Dict:
import torch
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCamelCase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__UpperCamelCase =ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__UpperCamelCase =self._load_datasamples(1 )
__UpperCamelCase =WhisperFeatureExtractor()
__UpperCamelCase =feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =self._load_datasamples(1 )[0]
__UpperCamelCase =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCamelCase =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 682 | 0 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : LevitConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ) -> List[str]:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase =timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 1_92:
__UpperCamelCase =timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 2_56:
__UpperCamelCase =timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 3_84:
__UpperCamelCase =timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE__ )
from_model.eval()
__UpperCamelCase =LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =OrderedDict()
__UpperCamelCase =from_model.state_dict()
__UpperCamelCase =list(from_model.state_dict().keys() )
__UpperCamelCase =list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
__UpperCamelCase =name
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ) -> Union[str, Any]:
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__UpperCamelCase ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 715 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : UNetaDModel
UpperCAmelCase__ : ScoreSdeVeScheduler
def __init__( self , A_ , A_ ) -> Tuple:
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = 2000 , A_ = None , A_ = "pil" , A_ = True , **A_ , ) -> Union[ImagePipelineOutput, Tuple]:
__UpperCamelCase =self.unet.config.sample_size
__UpperCamelCase =(batch_size, 3, img_size, img_size)
__UpperCamelCase =self.unet
__UpperCamelCase =randn_tensor(A_ , generator=A_ ) * self.scheduler.init_noise_sigma
__UpperCamelCase =sample.to(self.device )
self.scheduler.set_timesteps(A_ )
self.scheduler.set_sigmas(A_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase =self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase =self.unet(A_ , A_ ).sample
__UpperCamelCase =self.scheduler.step_correct(A_ , A_ , generator=A_ ).prev_sample
# prediction step
__UpperCamelCase =model(A_ , A_ ).sample
__UpperCamelCase =self.scheduler.step_pred(A_ , A_ , A_ , generator=A_ )
__UpperCamelCase , __UpperCamelCase =output.prev_sample, output.prev_sample_mean
__UpperCamelCase =sample_mean.clamp(0 , 1 )
__UpperCamelCase =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase =self.numpy_to_pil(A_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=A_ )
| 716 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
# ===== initialization =====
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 | 0 |
from __future__ import annotations
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[int] ):
if not nums:
return 0
__UpperCamelCase =nums[0]
__UpperCamelCase =0
for num in nums[1:]:
__UpperCamelCase , __UpperCamelCase =(
max_excluding + num,
max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
)
return max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =xa
__UpperCamelCase =xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__UpperCamelCase =x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCamelCase =x_na
__UpperCamelCase =x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 682 | 0 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_A = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase__ : int = 1_0_0_0_0
UpperCAmelCase__ : Optional[List[str]] = None
UpperCAmelCase__ : Optional[datasets.Features] = None
class UpperCAmelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = ParquetConfig
def _a ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def _a ( self , A_ ) -> Optional[Any]:
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__UpperCamelCase =dl_manager.download_and_extract(self.config.data_files )
if isinstance(A_ , (str, list, tuple) ):
__UpperCamelCase =data_files
if isinstance(A_ , A_ ):
__UpperCamelCase =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__UpperCamelCase =[dl_manager.iter_files(A_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
__UpperCamelCase =[]
for split_name, files in data_files.items():
if isinstance(A_ , A_ ):
__UpperCamelCase =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__UpperCamelCase =[dl_manager.iter_files(A_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(A_ ):
with open(A_ , 'rb' ) as f:
__UpperCamelCase =datasets.Features.from_arrow_schema(pq.read_schema(A_ ) )
break
splits.append(datasets.SplitGenerator(name=A_ , gen_kwargs={'files': files} ) )
return splits
def _a ( self , A_ ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__UpperCamelCase =table_cast(A_ , self.info.features.arrow_schema )
return pa_table
def _a ( self , A_ ) -> Dict:
__UpperCamelCase =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ):
with open(A_ , 'rb' ) as f:
__UpperCamelCase =pq.ParquetFile(A_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__UpperCamelCase =pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(A_ )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(A_ )}: {e}' )
raise
| 718 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 682 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 719 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCamelCase =(self.image_size // 32) ** 2
__UpperCamelCase =num_patches + 1
def _a ( self ) -> str:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> str:
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' )
__UpperCamelCase =model(**A_ )
__UpperCamelCase =outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCamelCase =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 682 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = ["pixel_values"]
def __init__( self , A_ = True , A_ = 1 / 255 , A_ = True , A_ = 8 , **A_ , ) -> None:
super().__init__(**A_ )
__UpperCamelCase =do_rescale
__UpperCamelCase =rescale_factor
__UpperCamelCase =do_pad
__UpperCamelCase =pad_size
def _a ( self , A_ , A_ , A_ = None , **A_ ) -> np.ndarray:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None ) -> str:
__UpperCamelCase , __UpperCamelCase =get_image_size(A_ )
__UpperCamelCase =(old_height // size + 1) * size - old_height
__UpperCamelCase =(old_width // size + 1) * size - old_width
return pad(A_ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=A_ )
def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> Tuple:
__UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase =do_pad if do_pad is not None else self.do_pad
__UpperCamelCase =pad_size if pad_size is not None else self.pad_size
__UpperCamelCase =make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase =[to_numpy_array(A_ ) for image in images]
if do_rescale:
__UpperCamelCase =[self.rescale(image=A_ , scale=A_ ) for image in images]
if do_pad:
__UpperCamelCase =[self.pad(A_ , size=A_ ) for image in images]
__UpperCamelCase =[to_channel_dimension_format(A_ , A_ ) for image in images]
__UpperCamelCase ={'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ ) | 720 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : LevitConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase =timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 1_92:
__UpperCamelCase =timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 2_56:
__UpperCamelCase =timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 3_84:
__UpperCamelCase =timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE__ )
from_model.eval()
__UpperCamelCase =LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =OrderedDict()
__UpperCamelCase =from_model.state_dict()
__UpperCamelCase =list(from_model.state_dict().keys() )
__UpperCamelCase =list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
__UpperCamelCase =name
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ):
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__UpperCamelCase ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 682 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCAmelCase__ ( A_ ):
UpperCAmelCase__ : Union[str, Any] = ""
UpperCAmelCase__ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
UpperCAmelCase__ : str = None # compression type in fsspec. ex: "gzip"
UpperCAmelCase__ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , A_ = "" , A_ = None , A_ = None , **A_ ) -> Optional[Any]:
super().__init__(self , **A_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__UpperCamelCase =fsspec.open(
A_ , mode='rb' , protocol=A_ , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__UpperCamelCase =os.path.basename(self.file.path.split('::' )[0] )
__UpperCamelCase =(
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
__UpperCamelCase =None
@classmethod
def _a ( cls , A_ ) -> List[Any]:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(A_ ).lstrip('/' )
def _a ( self ) -> List[str]:
if self.dir_cache is None:
__UpperCamelCase ={**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
__UpperCamelCase ={f['name']: f}
def _a ( self , A_ ) -> Dict:
return self.file.open().read()
def _a ( self , A_ , A_ = "rb" , A_=None , A_=True , A_=None , **A_ , ) -> Union[str, Any]:
__UpperCamelCase =self._strip_protocol(A_ )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class UpperCAmelCase__ ( A_ ):
UpperCAmelCase__ : int = "bz2"
UpperCAmelCase__ : Union[str, Any] = "bz2"
UpperCAmelCase__ : Dict = ".bz2"
class UpperCAmelCase__ ( A_ ):
UpperCAmelCase__ : Any = "gzip"
UpperCAmelCase__ : Optional[int] = "gzip"
UpperCAmelCase__ : List[Any] = ".gz"
class UpperCAmelCase__ ( A_ ):
UpperCAmelCase__ : Union[str, Any] = "lz4"
UpperCAmelCase__ : List[Any] = "lz4"
UpperCAmelCase__ : Optional[int] = ".lz4"
class UpperCAmelCase__ ( A_ ):
UpperCAmelCase__ : Optional[int] = "xz"
UpperCAmelCase__ : Optional[Any] = "xz"
UpperCAmelCase__ : Dict = ".xz"
class UpperCAmelCase__ ( A_ ):
UpperCAmelCase__ : Optional[int] = "zstd"
UpperCAmelCase__ : Optional[int] = "zstd"
UpperCAmelCase__ : Union[str, Any] = ".zst"
def __init__( self , A_ , A_ = "rb" , A_ = None , A_ = None , A_ = DEFAULT_BLOCK_SIZE , **A_ , ) -> str:
super().__init__(
fo=A_ , mode=A_ , target_protocol=A_ , target_options=A_ , block_size=A_ , **A_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__UpperCamelCase =self.file.__enter__
class UpperCAmelCase__ :
def __init__( self , A_ ) -> Union[str, Any]:
__UpperCamelCase =file_
def __enter__( self ) -> Any:
self._file.__enter__()
return self
def __exit__( self , *A_ , **A_ ) -> List[str]:
self._file.__exit__(*A_ , **A_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _a ( self ) -> List[str]:
return next(self._file )
def __getattr__( self , A_ ) -> List[Any]:
return getattr(self._file , A_ )
def fixed_enter(*A_ , **A_ ):
return WrappedFile(_enter(*A_ , **A_ ) )
__UpperCamelCase =fixed_enter
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase ='laion/clap-htsat-unfused'
__UpperCamelCase =tempfile.mkdtemp()
def _a ( self , **A_ ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def _a ( self , **A_ ) -> Dict:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _a ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> str:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> int:
__UpperCamelCase =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase =self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =floats_list((3, 1000) )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' )
__UpperCamelCase =processor(audios=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase ='This is a test string'
__UpperCamelCase =processor(text=A_ )
__UpperCamelCase =tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(A_ )
__UpperCamelCase =tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 682 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Dict:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =scope
def _a ( self ) -> Any:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> int:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
__UpperCamelCase =NystromformerModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ )
__UpperCamelCase =model(A_ , token_type_ids=A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =NystromformerForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =NystromformerForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =NystromformerForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =self.num_labels
__UpperCamelCase =NystromformerForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =NystromformerForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[str] = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self ) -> str:
__UpperCamelCase =NystromformerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase =type
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> List[Any]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =NystromformerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[Any]:
__UpperCamelCase =NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
__UpperCamelCase =torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =torch.Size((1, 6, 768) )
self.assertEqual(output.shape , A_ )
__UpperCamelCase =torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1E-4 ) )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase ='the [MASK] of Belgium is Brussels'
__UpperCamelCase =AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
__UpperCamelCase =NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
__UpperCamelCase =tokenizer(A_ , return_tensors='pt' )
with torch.no_grad():
__UpperCamelCase =model(encoding.input_ids ).logits
__UpperCamelCase =token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(A_ ) , 'capital' )
| 700 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('test' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__UpperCamelCase =script_name
else:
__UpperCamelCase =F'--config_file={args.config_file} {script_name}'
__UpperCamelCase =['accelerate-launch'] + test_args.split()
__UpperCamelCase =execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _UpperCAmelCase ( ):
__UpperCamelCase =test_command_parser()
__UpperCamelCase =parser.parse_args()
test_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=A_ , )
assert hasattr(self , 'env' )
def _a ( self , A_ ) -> Dict:
# configuration for running training on smdistributed Model Parallel
__UpperCamelCase ={
'enabled': True,
'processes_per_host': 8,
}
__UpperCamelCase ={
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
__UpperCamelCase ={'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
__UpperCamelCase ='trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=A_ , instance_type=self.instance_type , debugger_hook_config=A_ , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} , metric_definitions=self.env.metric_definitions , distribution=A_ , py_version='py36' , )
def _a ( self , A_ ) -> Optional[int]:
TrainingJobAnalytics(A_ ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def _a ( self , A_ ) -> Union[str, Any]:
# create estimator
__UpperCamelCase =self.create_estimator(A_ )
# run training
estimator.fit()
# result dataframe
__UpperCamelCase =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__UpperCamelCase =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__UpperCamelCase =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__UpperCamelCase =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , A_ )
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =params
__UpperCamelCase =np.array(A_ )
__UpperCamelCase =np.array([len(A_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A_ ) -> Any:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Dict:
return len(self.lengths )
def _a ( self ) -> str:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _a ( self ) -> int:
__UpperCamelCase =self.params.max_model_input_size
__UpperCamelCase =self.lengths > max_len
logger.info(f'Splitting {sum(A_ )} too long sequences.' )
def divide_chunks(A_ , A_ ):
return [l[i : i + n] for i in range(0 , len(A_ ) , A_ )]
__UpperCamelCase =[]
__UpperCamelCase =[]
if self.params.mlm:
__UpperCamelCase , __UpperCamelCase =self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
__UpperCamelCase , __UpperCamelCase =self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__UpperCamelCase =[]
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__UpperCamelCase =np.insert(A_ , 0 , A_ )
if sub_s[-1] != sep_id:
__UpperCamelCase =np.insert(A_ , len(A_ ) , A_ )
assert len(A_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A_ )
new_tok_ids.extend(A_ )
new_lengths.extend([len(A_ ) for l in sub_seqs] )
__UpperCamelCase =np.array(A_ )
__UpperCamelCase =np.array(A_ )
def _a ( self ) -> Any:
__UpperCamelCase =len(self )
__UpperCamelCase =self.lengths > 11
__UpperCamelCase =self.token_ids[indices]
__UpperCamelCase =self.lengths[indices]
__UpperCamelCase =len(self )
logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def _a ( self ) -> int:
if "unk_token" not in self.params.special_tok_ids:
return
else:
__UpperCamelCase =self.params.special_tok_ids['unk_token']
__UpperCamelCase =len(self )
__UpperCamelCase =np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__UpperCamelCase =(unk_occs / self.lengths) < 0.5
__UpperCamelCase =self.token_ids[indices]
__UpperCamelCase =self.lengths[indices]
__UpperCamelCase =len(self )
logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def _a ( self ) -> Any:
if not self.params.is_master:
return
logger.info(f'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _a ( self , A_ ) -> str:
__UpperCamelCase =[t[0] for t in batch]
__UpperCamelCase =[t[1] for t in batch]
assert len(A_ ) == len(A_ )
# Max for paddings
__UpperCamelCase =max(A_ )
# Pad token ids
if self.params.mlm:
__UpperCamelCase =self.params.special_tok_ids['pad_token']
else:
__UpperCamelCase =self.params.special_tok_ids['unk_token']
__UpperCamelCase =[list(t.astype(A_ ) ) + [pad_idx] * (max_seq_len_ - len(A_ )) for t in token_ids]
assert len(tk_ ) == len(A_ )
assert all(len(A_ ) == max_seq_len_ for t in tk_ )
__UpperCamelCase =torch.tensor(tk_ ) # (bs, max_seq_len_)
__UpperCamelCase =torch.tensor(A_ ) # (bs)
return tk_t, lg_t
| 703 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> str:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1e-4
def _a ( self ) -> int:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> int:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1e-4
def _a ( self ) -> List[Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 682 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_A = logging.get_logger('transformers.models.speecht5')
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
hf_model.apply_weight_norm()
__UpperCamelCase =checkpoint['input_conv.weight_g']
__UpperCamelCase =checkpoint['input_conv.weight_v']
__UpperCamelCase =checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
__UpperCamelCase =checkpoint[F'upsamples.{i}.1.weight_g']
__UpperCamelCase =checkpoint[F'upsamples.{i}.1.weight_v']
__UpperCamelCase =checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__UpperCamelCase =checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
__UpperCamelCase =checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
__UpperCamelCase =checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
__UpperCamelCase =checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
__UpperCamelCase =checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
__UpperCamelCase =checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
__UpperCamelCase =checkpoint['output_conv.1.weight_g']
__UpperCamelCase =checkpoint['output_conv.1.weight_v']
__UpperCamelCase =checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ):
if config_path is not None:
__UpperCamelCase =SpeechTaHifiGanConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =SpeechTaHifiGanConfig()
__UpperCamelCase =SpeechTaHifiGan(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ )
load_weights(orig_checkpoint['model']['generator'] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =np.load(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =stats[0].reshape(-1 )
__UpperCamelCase =stats[1].reshape(-1 )
__UpperCamelCase =torch.from_numpy(SCREAMING_SNAKE_CASE__ ).float()
__UpperCamelCase =torch.from_numpy(SCREAMING_SNAKE_CASE__ ).float()
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_A = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 704 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_A = 50_0000
_A , _A = os.path.split(__file__)
_A = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : datasets.Dataset , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =dataset.map(**SCREAMING_SNAKE_CASE__ )
@get_duration
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : datasets.Dataset , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =dataset.filter(**SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( ):
__UpperCamelCase ={'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase =datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
__UpperCamelCase =generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE__ , 'dataset.arrow' ) , SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=SCREAMING_SNAKE_CASE__ )
def tokenize(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return tokenizer(examples['text'] )
__UpperCamelCase =map(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='numpy' ):
__UpperCamelCase =map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='pandas' ):
__UpperCamelCase =map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
__UpperCamelCase =map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
__UpperCamelCase =map(SCREAMING_SNAKE_CASE__ , function=lambda SCREAMING_SNAKE_CASE__ : None , batched=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =map(SCREAMING_SNAKE_CASE__ , function=SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =filter(SCREAMING_SNAKE_CASE__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 705 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_A = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
_A = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
_A = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
_A = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
_A = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
for tf_name, hf_name in patterns:
__UpperCamelCase =k.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return k
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict ):
__UpperCamelCase =BigBirdPegasusConfig(**SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =BigBirdPegasusForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch_model.state_dict()
__UpperCamelCase ={}
# separating decoder weights
__UpperCamelCase ={k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
__UpperCamelCase ={k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
__UpperCamelCase =[k.endswith(SCREAMING_SNAKE_CASE__ ) for ending in KEYS_TO_IGNORE]
if any(SCREAMING_SNAKE_CASE__ ):
continue
__UpperCamelCase =DECODER_PATTERNS
__UpperCamelCase =rename_state_dict_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__UpperCamelCase =v.T
__UpperCamelCase =torch.from_numpy(SCREAMING_SNAKE_CASE__ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
__UpperCamelCase =[k.endswith(SCREAMING_SNAKE_CASE__ ) for ending in KEYS_TO_IGNORE]
if any(SCREAMING_SNAKE_CASE__ ):
continue
__UpperCamelCase =REMAINING_PATTERNS
__UpperCamelCase =rename_state_dict_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__UpperCamelCase =v.T
__UpperCamelCase =torch.from_numpy(SCREAMING_SNAKE_CASE__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
__UpperCamelCase =mapping['model.embed_positions.weight']
__UpperCamelCase =mapping.pop('model.embed_positions.weight' )
__UpperCamelCase , __UpperCamelCase =torch_model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={}
__UpperCamelCase =['global_step']
for name, shape in tqdm(SCREAMING_SNAKE_CASE__ , desc='converting tf checkpoint to dict' ):
__UpperCamelCase =any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCamelCase =tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =array
return tf_weights
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : dict ):
__UpperCamelCase =get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =convert_bigbird_pegasus(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
_A = parser.parse_args()
_A = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 706 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =original_name.split('.' )[0]
__UpperCamelCase =key.split('.' )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
__UpperCamelCase =orig_block_num - offset
__UpperCamelCase =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =OrderedDict()
__UpperCamelCase , __UpperCamelCase =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase =key[: key.find('proj' )]
__UpperCamelCase =key.replace(SCREAMING_SNAKE_CASE__ , F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm1' , 'before_norm' )
if "norm2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__UpperCamelCase =key.replace('head' , 'classifier' )
__UpperCamelCase =value
return new_state_dict
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =model_name[-3:]
__UpperCamelCase =10_00
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =(1, 10_00)
# set config attributes
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase =[2, 2, 6, 2]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s24":
__UpperCamelCase =[4, 4, 12, 4]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.9
elif size == "m36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
elif size == "m48":
__UpperCamelCase =[8, 8, 24, 8]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase =rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
__UpperCamelCase =PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase =torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__UpperCamelCase =torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__UpperCamelCase =torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__UpperCamelCase =torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__UpperCamelCase =torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 682 | 0 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_A = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = True , ) -> str:
__UpperCamelCase =[file for file in os.listdir(A_ ) if os.path.isfile(os.path.join(A_ , A_ ) )]
if identifier is not None:
__UpperCamelCase =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(A_ , A_ ):
for n_ in n_identifier:
__UpperCamelCase =[file for file in files if n_ not in file]
else:
__UpperCamelCase =[file for file in files if n_identifier not in file]
__UpperCamelCase =ignore_files or []
ignore_files.append('__init__.py' )
__UpperCamelCase =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , A_ )
if only_modules:
__UpperCamelCase =file.split('.' )[0]
try:
__UpperCamelCase =getattr(A_ , A_ )
__UpperCamelCase =doctest.DocTestSuite(A_ )
__UpperCamelCase =unittest.TextTestRunner().run(A_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'{module_identifier} is not a module.' )
else:
__UpperCamelCase =doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =Path('src/transformers' )
__UpperCamelCase ='modeling'
__UpperCamelCase =[
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(A_ , identifier=A_ , ignore_files=A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =Path('src/transformers' )
__UpperCamelCase ='tokenization'
self.analyze_directory(A_ , identifier=A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =Path('src/transformers' )
__UpperCamelCase ='configuration'
self.analyze_directory(A_ , identifier=A_ )
def _a ( self ) -> Any:
__UpperCamelCase =Path('src/transformers' )
__UpperCamelCase =['configuration', 'modeling', 'tokenization']
self.analyze_directory(A_ , n_identifier=A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =Path('docs/source' )
__UpperCamelCase =['favicon.ico']
self.analyze_directory(A_ , ignore_files=A_ , only_modules=A_ )
| 707 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['MobileViTFeatureExtractor']
_A = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 1 if input_a == input_a else 0
def _UpperCAmelCase ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 682 | 0 |
# Imports
import numpy as np
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_=None , A_=None , A_=None , A_=None , A_=None ) -> Any:
self.set_matricies(red=A_ , green=A_ , blue=A_ , red_edge=A_ , nir=A_ )
def _a ( self , A_=None , A_=None , A_=None , A_=None , A_=None ) -> int:
if red is not None:
__UpperCamelCase =red
if green is not None:
__UpperCamelCase =green
if blue is not None:
__UpperCamelCase =blue
if red_edge is not None:
__UpperCamelCase =red_edge
if nir is not None:
__UpperCamelCase =nir
return True
def _a ( self , A_="" , A_=None , A_=None , A_=None , A_=None , A_=None ) -> Optional[int]:
self.set_matricies(red=A_ , green=A_ , blue=A_ , red_edge=A_ , nir=A_ )
__UpperCamelCase ={
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def _a ( self ) -> Any:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def _a ( self ) -> Union[str, Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _a ( self ) -> Dict:
return self.nir * (self.red / (self.green**2))
def _a ( self ) -> List[str]:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _a ( self ) -> List[Any]:
return (self.nir - self.red) / (self.nir + self.red)
def _a ( self ) -> Dict:
return (self.nir - self.blue) / (self.nir + self.blue)
def _a ( self ) -> Optional[Any]:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _a ( self ) -> Tuple:
return (self.nir - self.green) / (self.nir + self.green)
def _a ( self ) -> int:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _a ( self ) -> Dict:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _a ( self ) -> List[Any]:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _a ( self ) -> int:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _a ( self , A_=0.08 , A_=1.22 , A_=0.03 ) -> Optional[int]:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _a ( self ) -> Optional[int]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _a ( self ) -> Tuple:
return (self.nir / self.green) - 1
def _a ( self ) -> List[str]:
return (self.nir / self.redEdge) - 1
def _a ( self ) -> List[Any]:
return (self.red - self.blue) / self.red
def _a ( self ) -> Tuple:
__UpperCamelCase =self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _a ( self ) -> int:
return self.nir - self.green
def _a ( self ) -> List[str]:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _a ( self ) -> Tuple:
__UpperCamelCase =(2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def _a ( self , A_=0.16 ) -> str:
return (self.nir - self.green) / (self.nir + self.green + y)
def _a ( self , A_=0.5 ) -> Optional[Any]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _a ( self ) -> int:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _a ( self , A_=None , A_=None ) -> Optional[int]:
return (self.nir - b) / (a * self.red)
def _a ( self ) -> Optional[int]:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _a ( self ) -> Tuple:
return (self.red + self.green + self.blue) / 30.5
def _a ( self ) -> Union[str, Any]:
return self.nir / self.red
def _a ( self ) -> Optional[Any]:
return (self.rvi() - 1) / (self.rvi() + 1)
def _a ( self ) -> Optional[int]:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _a ( self ) -> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _a ( self ) -> Any:
return self.nir / (self.nir + self.red + self.green)
def _a ( self ) -> Any:
return self.red / (self.nir + self.red + self.green)
def _a ( self ) -> Union[str, Any]:
return (self.green - self.red) / (self.green + self.red)
def _a ( self ) -> Dict:
return (self.red - self.green) / (self.red + self.green)
def _a ( self ) -> str:
__UpperCamelCase =np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__UpperCamelCase =np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _a ( self ) -> Optional[Any]:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _a ( self ) -> Any:
return self.nir / self.red
def _a ( self ) -> Any:
return (self.ndvi() + 0.5) ** (1 / 2)
def _a ( self ) -> Tuple:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 709 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 ):
__UpperCamelCase =right or len(SCREAMING_SNAKE_CASE__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = "mra"
def __init__( self , A_=50265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=1 , A_=0.02 , A_=1E-5 , A_="absolute" , A_=4 , A_="full" , A_=0 , A_=0 , A_=1 , A_=0 , A_=2 , **A_ , ) -> Optional[Any]:
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =type_vocab_size
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =position_embedding_type
__UpperCamelCase =block_per_row
__UpperCamelCase =approx_mode
__UpperCamelCase =initial_prior_first_n_blocks
__UpperCamelCase =initial_prior_diagonal_n_blocks
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_A = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_A = direct_transformers_import(PATH_TO_TRANSFORMERS)
_A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_A = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'config.{attribute}' in modeling_source
or F'getattr(config, "{attribute}"' in modeling_source
or F'getattr(self.config, "{attribute}"' in modeling_source
):
__UpperCamelCase =True
# Deal with multi-line cases
elif (
re.search(
rF'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , SCREAMING_SNAKE_CASE__ , )
is not None
):
__UpperCamelCase =True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__UpperCamelCase =True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__UpperCamelCase =[
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
__UpperCamelCase =['encoder_no_repeat_ngram_size']
# Special cases to be allowed
__UpperCamelCase =True
if not attribute_used:
__UpperCamelCase =False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__UpperCamelCase =True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__UpperCamelCase =True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__UpperCamelCase =True
elif attribute.endswith('_token_id' ):
__UpperCamelCase =True
# configuration class specific cases
if not case_allowed:
__UpperCamelCase =SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__UpperCamelCase =allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =dict(inspect.signature(config_class.__init__ ).parameters )
__UpperCamelCase =[x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
__UpperCamelCase =[signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__UpperCamelCase ={}
if len(config_class.attribute_map ) > 0:
__UpperCamelCase ={v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__UpperCamelCase =inspect.getsourcefile(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.dirname(SCREAMING_SNAKE_CASE__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__UpperCamelCase =[os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for fn in os.listdir(SCREAMING_SNAKE_CASE__ ) if fn.startswith('modeling_' )]
# Get the source code strings
__UpperCamelCase =[]
for path in modeling_paths:
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ ) as fp:
modeling_sources.append(fp.read() )
__UpperCamelCase =[]
for config_param, default_value in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# `attributes` here is all the variant names for `config_param`
__UpperCamelCase =[config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
unused_attributes.append(attributes[0] )
return sorted(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( ):
__UpperCamelCase ={}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__UpperCamelCase =[
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda SCREAMING_SNAKE_CASE__ : inspect.isclass(SCREAMING_SNAKE_CASE__ )
and issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and inspect.getmodule(SCREAMING_SNAKE_CASE__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__UpperCamelCase =check_config_attributes_being_used(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__UpperCamelCase =unused_attributes
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__UpperCamelCase ='The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += F'{name}: {attributes}\n'
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
check_config_attributes()
| 711 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_A = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=A_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _a ( self ) -> Dict:
__UpperCamelCase =super().to_dict()
for k, v in d.items():
if isinstance(A_ , A_ ):
__UpperCamelCase =v.to_dict()
return d
| 682 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = VideoToVideoSDPipeline
UpperCAmelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
UpperCAmelCase__ : List[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
UpperCAmelCase__ : int = False
# No `output_type`.
UpperCAmelCase__ : int = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def _a ( self ) -> int:
torch.manual_seed(0 )
__UpperCamelCase =UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
__UpperCamelCase =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
__UpperCamelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__UpperCamelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__UpperCamelCase =CLIPTextModel(A_ )
__UpperCamelCase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _a ( self , A_ , A_=0 ) -> Optional[Any]:
# 3 frames
__UpperCamelCase =floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
__UpperCamelCase =torch.manual_seed(A_ )
else:
__UpperCamelCase =torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def _a ( self ) -> Tuple:
__UpperCamelCase ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase =self.get_dummy_components()
__UpperCamelCase =VideoToVideoSDPipeline(**A_ )
__UpperCamelCase =sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =self.get_dummy_inputs(A_ )
__UpperCamelCase ='np'
__UpperCamelCase =sd_pipe(**A_ ).frames
__UpperCamelCase =frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__UpperCamelCase =np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ , expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _a ( self ) -> int:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _a ( self ) -> Any:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def _a ( self ) -> Any:
pass
def _a ( self ) -> Optional[Any]:
return super().test_progress_bar()
@slow
@skip_mps
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Tuple:
__UpperCamelCase =VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__UpperCamelCase =torch.Generator(device='cpu' ).manual_seed(0 )
__UpperCamelCase =torch.randn((1, 10, 3, 1024, 576) , generator=A_ )
__UpperCamelCase =video.to('cuda' )
__UpperCamelCase ='Spiderman is surfing'
__UpperCamelCase =pipe(A_ , video=A_ , generator=A_ , num_inference_steps=3 , output_type='pt' ).frames
__UpperCamelCase =np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 712 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682 | 0 |
import os
import pytest
from attr import dataclass
_A : List[str] = 'us-east-1' # defaults region
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : str
UpperCAmelCase__ : Union[str, Any] = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
UpperCAmelCase__ : Tuple = {
"task_name": "mnli",
"per_device_train_batch_size": 1_6,
"per_device_eval_batch_size": 1_6,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 5_0_0,
"save_steps": 5_5_0_0,
}
UpperCAmelCase__ : Union[str, Any] = {**hyperparameters, "max_steps": 1_0_0_0}
@property
def _a ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _a ( self ) -> str:
return f'{self.framework}-transfromers-test'
@property
def _a ( self ) -> str:
return f'./tests/sagemaker/scripts/{self.framework}'
@property
def _a ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =SageMakerTestEnvironment(framework=request.cls.framework )
| 713 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = filter_non_english
def _a ( self ) -> Optional[Any]:
super().setUp()
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase ={}
__UpperCamelCase ={}
for i, value in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =i
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A_ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Any:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase ={}
for i, token in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =RoCBertWordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase =tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase =tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
__UpperCamelCase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ) -> List[str]:
__UpperCamelCase =['的', '人', '有']
__UpperCamelCase =''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =False
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.encode('你好' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode('你是谁' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='你好,你是谁'
__UpperCamelCase =tokenizer.tokenize(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase =tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 682 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A = random.Random()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
if rng is None:
__UpperCamelCase =global_rng
__UpperCamelCase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =min_seq_length
__UpperCamelCase =max_seq_length
__UpperCamelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase =padding_value
__UpperCamelCase =sampling_rate
__UpperCamelCase =return_attention_mask
__UpperCamelCase =do_normalize
__UpperCamelCase =feature_size
__UpperCamelCase =chunk_length
__UpperCamelCase =hop_length
def _a ( self ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , A_=False , A_=False ) -> Any:
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase =[np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
__UpperCamelCase =WhisperFeatureExtractionTester(self )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase =self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase =self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCamelCase =feature_extractor(A_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCamelCase =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCamelCase =np.asarray(A_ )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
__UpperCamelCase =[x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self ) -> Dict:
import torch
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCamelCase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__UpperCamelCase =ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__UpperCamelCase =self._load_datasamples(1 )
__UpperCamelCase =WhisperFeatureExtractor()
__UpperCamelCase =feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =self._load_datasamples(1 )[0]
__UpperCamelCase =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCamelCase =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 682 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int=None ) -> Optional[Any]:
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('env' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
__UpperCamelCase =torch.__version__
__UpperCamelCase =torch.cuda.is_available()
__UpperCamelCase =is_xpu_available()
__UpperCamelCase =is_npu_available()
__UpperCamelCase ='Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =load_config_from_file(args.config_file ).to_dict()
__UpperCamelCase ={
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})',
'PyTorch XPU available': str(SCREAMING_SNAKE_CASE__ ),
'PyTorch NPU available': str(SCREAMING_SNAKE_CASE__ ),
'System RAM': F'{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB',
}
if pt_cuda_available:
__UpperCamelCase =torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F'- {prop}: {val}' for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
__UpperCamelCase =(
'\n'.join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else F'\t{accelerate_config}'
)
print(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =accelerate_config
return info
def _UpperCAmelCase ( ) -> Tuple:
__UpperCamelCase =env_command_parser()
__UpperCamelCase =parser.parse_args()
env_command(SCREAMING_SNAKE_CASE__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 715 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[Any]:
__UpperCamelCase =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=A_ ).to(A_ )
__UpperCamelCase =AutoTokenizer.from_pretrained('google/mt5-small' )
__UpperCamelCase =tokenizer('Hello there' , return_tensors='pt' ).input_ids
__UpperCamelCase =tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__UpperCamelCase =model(input_ids.to(A_ ) , labels=labels.to(A_ ) ).loss
__UpperCamelCase =-(labels.shape[-1] * loss.item())
__UpperCamelCase =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 716 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
# ===== initialization =====
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = PegasusTokenizer
UpperCAmelCase__ : Union[str, Any] = PegasusTokenizerFast
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : List[str] = True
def _a ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase =PegasusTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _a ( self ) -> int:
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def _a ( self , **A_ ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ ) -> Optional[Any]:
return ("This is a test", "This is a test")
def _a ( self ) -> List[Any]:
__UpperCamelCase ='</s>'
__UpperCamelCase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(A_ ) , 1103 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _a ( self ) -> Dict:
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCamelCase =self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCamelCase =(
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
__UpperCamelCase =rust_tokenizer([raw_input_str] , return_tensors=A_ , add_special_tokens=A_ ).input_ids[0]
__UpperCamelCase =py_tokenizer([raw_input_str] , return_tensors=A_ , add_special_tokens=A_ ).input_ids[0]
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__UpperCamelCase ='<mask_1> To ensure a <mask_2> flow of bank resolutions.'
__UpperCamelCase =[2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
__UpperCamelCase =tokenizer([raw_input_str] , return_tensors=A_ ).input_ids[0]
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__UpperCamelCase ='To ensure a smooth flow of bank resolutions.'
__UpperCamelCase =[413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
__UpperCamelCase =tokenizer([raw_input_str] , return_tensors=A_ ).input_ids[0]
self.assertListEqual(A_ , A_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _a ( self ) -> List[Any]:
__UpperCamelCase =['This is going to be way too long.' * 150, 'short example']
__UpperCamelCase =['not super long but more than 5 tokens', 'tiny']
__UpperCamelCase =self._large_tokenizer(A_ , padding=A_ , truncation=A_ , return_tensors='pt' )
__UpperCamelCase =self._large_tokenizer(
text_target=A_ , max_length=5 , padding=A_ , truncation=A_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A_ ) == 2 # input_ids, attention_mask.
@slow
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase ={'input_ids': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = PegasusTokenizer
UpperCAmelCase__ : Union[str, Any] = PegasusTokenizerFast
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : int = True
def _a ( self ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase =PegasusTokenizer(A_ , offset=0 , mask_token_sent=A_ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _a ( self ) -> Union[str, Any]:
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def _a ( self , **A_ ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _a ( self , A_ ) -> Optional[int]:
return ("This is a test", "This is a test")
def _a ( self ) -> List[str]:
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCamelCase =self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCamelCase =(
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
__UpperCamelCase =rust_tokenizer([raw_input_str] , return_tensors=A_ , add_special_tokens=A_ ).input_ids[0]
__UpperCamelCase =py_tokenizer([raw_input_str] , return_tensors=A_ , add_special_tokens=A_ ).input_ids[0]
self.assertListEqual(A_ , A_ )
@require_torch
def _a ( self ) -> Dict:
__UpperCamelCase =['This is going to be way too long.' * 1000, 'short example']
__UpperCamelCase =['not super long but more than 5 tokens', 'tiny']
__UpperCamelCase =self._large_tokenizer(A_ , padding=A_ , truncation=A_ , return_tensors='pt' )
__UpperCamelCase =self._large_tokenizer(
text_target=A_ , max_length=5 , padding=A_ , truncation=A_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A_ ) == 2 # input_ids, attention_mask.
def _a ( self ) -> Any:
__UpperCamelCase =(
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
__UpperCamelCase =self._large_tokenizer(A_ ).input_ids
self.assertListEqual(
A_ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 717 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =xa
__UpperCamelCase =xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__UpperCamelCase =x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCamelCase =x_na
__UpperCamelCase =x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 682 | 0 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
__UpperCamelCase =os.path.abspath(SCREAMING_SNAKE_CASE__ )
logger.info(F'Loading PyTorch weights from {pt_path}' )
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
__UpperCamelCase =convert_pytorch_state_dict_to_flax(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__UpperCamelCase =convert_pytorch_sharded_state_dict_to_flax(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return flax_state_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple[str] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, jnp.ndarray] , SCREAMING_SNAKE_CASE__ : str , ):
def is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ : Tuple[str] ) -> bool:
return len(set(SCREAMING_SNAKE_CASE__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__UpperCamelCase =pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__UpperCamelCase =pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__UpperCamelCase =pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__UpperCamelCase =pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__UpperCamelCase =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__UpperCamelCase =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__UpperCamelCase =pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__UpperCamelCase =pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__UpperCamelCase =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__UpperCamelCase =pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__UpperCamelCase =pt_tuple_key[-2] + '_v'
if name is not None:
__UpperCamelCase =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
# convert pytorch tensor to numpy
__UpperCamelCase ={k: v.numpy() for k, v in pt_state_dict.items()}
__UpperCamelCase =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__UpperCamelCase =flax_model.params['params']
else:
__UpperCamelCase =flax_model.params
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__UpperCamelCase =flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={}
__UpperCamelCase =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__UpperCamelCase =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase =tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__UpperCamelCase =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__UpperCamelCase =pt_tuple_key[1:]
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase =rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# add model prefix if necessary
__UpperCamelCase =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__UpperCamelCase =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__UpperCamelCase =jnp.asarray(SCREAMING_SNAKE_CASE__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
# also add unexpected weight so that warning is thrown
__UpperCamelCase =jnp.asarray(SCREAMING_SNAKE_CASE__ )
else:
# also add unexpected weight so that warning is thrown
__UpperCamelCase =jnp.asarray(SCREAMING_SNAKE_CASE__ )
return unflatten_dict(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
import torch
# Load the index
__UpperCamelCase ={}
for shard_file in shard_filenames:
# load using msgpack utils
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={k: v.numpy() for k, v in pt_state_dict.items()}
__UpperCamelCase =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__UpperCamelCase =flax_model.params['params']
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
__UpperCamelCase =flax_model.params
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__UpperCamelCase =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase =tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__UpperCamelCase =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__UpperCamelCase =pt_tuple_key[1:]
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase =rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# add model prefix if necessary
__UpperCamelCase =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__UpperCamelCase =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__UpperCamelCase =jnp.asarray(SCREAMING_SNAKE_CASE__ )
continue
if "var" in flax_key[-1]:
__UpperCamelCase =jnp.asarray(SCREAMING_SNAKE_CASE__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
# also add unexpected weight so that warning is thrown
__UpperCamelCase =jnp.asarray(SCREAMING_SNAKE_CASE__ )
else:
# also add unexpected weight so that warning is thrown
__UpperCamelCase =jnp.asarray(SCREAMING_SNAKE_CASE__ )
return unflatten_dict(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =os.path.abspath(SCREAMING_SNAKE_CASE__ )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
__UpperCamelCase =getattr(SCREAMING_SNAKE_CASE__ , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as state_f:
try:
__UpperCamelCase =from_bytes(SCREAMING_SNAKE_CASE__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__UpperCamelCase =flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE__ : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE__ ) ).values()
if any(SCREAMING_SNAKE_CASE__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__UpperCamelCase =jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =pt_model.state_dict()
__UpperCamelCase =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
__UpperCamelCase =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__UpperCamelCase =[]
__UpperCamelCase =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__UpperCamelCase =flax_key_tuple[0] == pt_model.base_model_prefix
__UpperCamelCase ='.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__UpperCamelCase =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__UpperCamelCase =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(SCREAMING_SNAKE_CASE__ ) not in pt_model_dict:
# conv layer
__UpperCamelCase =flax_key_tuple[:-1] + ('weight',)
__UpperCamelCase =jnp.transpose(SCREAMING_SNAKE_CASE__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE__ ) not in pt_model_dict:
# linear layer
__UpperCamelCase =flax_key_tuple[:-1] + ('weight',)
__UpperCamelCase =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__UpperCamelCase =flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__UpperCamelCase =flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
__UpperCamelCase =flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
__UpperCamelCase ='.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__UpperCamelCase ='.'.join(SCREAMING_SNAKE_CASE__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__UpperCamelCase ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__UpperCamelCase =key.split('.' )
__UpperCamelCase =None
if key_components[-3::2] == ["parametrizations", "original0"]:
__UpperCamelCase =key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
__UpperCamelCase =key_components[-2] + '_v'
if name is not None:
__UpperCamelCase =key_components[:-3] + [name]
__UpperCamelCase ='.'.join(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =key
if flax_key in special_pt_names:
__UpperCamelCase =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__UpperCamelCase =np.asarray(SCREAMING_SNAKE_CASE__ ) if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) else flax_tensor
__UpperCamelCase =torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE__ )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# re-transform missing_keys to list
__UpperCamelCase =list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'If your task is similar to the task the model of the checkpoint was trained on, '
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 718 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 682 | 0 |
from collections import Counter
from timeit import timeit
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "" ):
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return True
__UpperCamelCase =input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__UpperCamelCase ={}
for character in lower_case_input_str:
__UpperCamelCase =character_freq_dict.get(SCREAMING_SNAKE_CASE__ , 0 ) + 1
__UpperCamelCase =0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "" ):
print('\nFor string = ' , SCREAMING_SNAKE_CASE__ , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(SCREAMING_SNAKE_CASE__ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(SCREAMING_SNAKE_CASE__ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
_A = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
_A = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 719 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCamelCase =(self.image_size // 32) ** 2
__UpperCamelCase =num_patches + 1
def _a ( self ) -> str:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> str:
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' )
__UpperCamelCase =model(**A_ )
__UpperCamelCase =outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCamelCase =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 682 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =3_84
if "tiny" in model_name:
__UpperCamelCase =[3, 3, 9, 3]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
if "small" in model_name:
__UpperCamelCase =[3, 3, 27, 3]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
if "base" in model_name:
__UpperCamelCase =[3, 3, 27, 3]
__UpperCamelCase =[1_28, 2_56, 5_12, 10_24]
__UpperCamelCase =5_12
if "large" in model_name:
__UpperCamelCase =[3, 3, 27, 3]
__UpperCamelCase =[1_92, 3_84, 7_68, 15_36]
__UpperCamelCase =7_68
if "xlarge" in model_name:
__UpperCamelCase =[3, 3, 27, 3]
__UpperCamelCase =[2_56, 5_12, 10_24, 20_48]
__UpperCamelCase =10_24
# set label information
__UpperCamelCase =1_50
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase ='ade20k-id2label.json'
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =ConvNextConfig(
depths=SCREAMING_SNAKE_CASE__ , hidden_sizes=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__UpperCamelCase =UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , )
return config
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =[]
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =dct.pop(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =val
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase ={
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
__UpperCamelCase =model_name_to_url[model_name]
__UpperCamelCase =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['state_dict']
__UpperCamelCase =get_upernet_config(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCamelCase =state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "bn" in key:
__UpperCamelCase =key.replace('bn' , 'batch_norm' )
__UpperCamelCase =val
# rename keys
__UpperCamelCase =create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify on image
__UpperCamelCase ='https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' )
__UpperCamelCase =SegformerImageProcessor()
__UpperCamelCase =processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
if model_name == "upernet-convnext-tiny":
__UpperCamelCase =torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCamelCase =torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCamelCase =torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCamelCase =torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCamelCase =torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 720 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : LevitConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase =timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 1_92:
__UpperCamelCase =timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 2_56:
__UpperCamelCase =timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 3_84:
__UpperCamelCase =timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE__ )
from_model.eval()
__UpperCamelCase =LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =OrderedDict()
__UpperCamelCase =from_model.state_dict()
__UpperCamelCase =list(from_model.state_dict().keys() )
__UpperCamelCase =list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
__UpperCamelCase =name
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ):
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__UpperCamelCase ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 682 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
UpperCAmelCase__ : int = ["pixel_values"]
def __init__( self , A_ = True , A_ = 32 , A_=PILImageResampling.BILINEAR , A_ = True , **A_ , ) -> None:
__UpperCamelCase =do_resize
__UpperCamelCase =do_rescale
__UpperCamelCase =size_divisor
__UpperCamelCase =resample
super().__init__(**A_ )
def _a ( self , A_ , A_ , A_ , A_ = None , **A_ ) -> np.ndarray:
__UpperCamelCase , __UpperCamelCase =get_image_size(A_ )
# Rounds the height and width down to the closest multiple of size_divisor
__UpperCamelCase =height // size_divisor * size_divisor
__UpperCamelCase =width // size_divisor * size_divisor
__UpperCamelCase =resize(A_ , (new_h, new_w) , resample=A_ , data_format=A_ , **A_ )
return image
def _a ( self , A_ , A_ , A_ = None , **A_ ) -> np.ndarray:
return rescale(image=A_ , scale=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ = None , A_ = None , A_=None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> BatchFeature:
__UpperCamelCase =do_resize if do_resize is not None else self.do_resize
__UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase =size_divisor if size_divisor is not None else self.size_divisor
__UpperCamelCase =resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
__UpperCamelCase =make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
__UpperCamelCase =[to_numpy_array(A_ ) for img in images]
if do_resize:
__UpperCamelCase =[self.resize(A_ , size_divisor=A_ , resample=A_ ) for image in images]
if do_rescale:
__UpperCamelCase =[self.rescale(A_ , scale=1 / 255 ) for image in images]
__UpperCamelCase =[to_channel_dimension_format(A_ , A_ ) for image in images]
__UpperCamelCase ={'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase ='laion/clap-htsat-unfused'
__UpperCamelCase =tempfile.mkdtemp()
def _a ( self , **A_ ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def _a ( self , **A_ ) -> Dict:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _a ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> str:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> int:
__UpperCamelCase =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase =self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =floats_list((3, 1000) )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' )
__UpperCamelCase =processor(audios=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase ='This is a test string'
__UpperCamelCase =processor(text=A_ )
__UpperCamelCase =tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(A_ )
__UpperCamelCase =tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 682 | 0 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase =SqlDatasetReader(
'dataset' , 'sqlite:///' + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_sql_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_sqlalchemy
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_sql_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE__ ) ) as con:
__UpperCamelCase =con.cursor()
cur.execute('SELECT * FROM dataset' )
for row in cur:
yield row
@require_sqlalchemy
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , 'tmp.sql' )
__UpperCamelCase =SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write()
__UpperCamelCase =iter_sql_file(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =iter_sql_file(SCREAMING_SNAKE_CASE__ )
for rowa, rowa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert rowa == rowa
@require_sqlalchemy
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , 'tmp.sql' )
__UpperCamelCase =SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write()
__UpperCamelCase =iter_sql_file(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =iter_sql_file(SCREAMING_SNAKE_CASE__ )
for rowa, rowa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert rowa == rowa
@require_sqlalchemy
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , 'tmp.sql' )
__UpperCamelCase =SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
| 700 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('test' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__UpperCamelCase =script_name
else:
__UpperCamelCase =F'--config_file={args.config_file} {script_name}'
__UpperCamelCase =['accelerate-launch'] + test_args.split()
__UpperCamelCase =execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _UpperCAmelCase ( ):
__UpperCamelCase =test_command_parser()
__UpperCamelCase =parser.parse_args()
test_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 | 0 |
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self , A_=None , A_=None , A_=None , **A_ ) -> int:
if tokenize_kwargs is None:
__UpperCamelCase ={}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
__UpperCamelCase =truncation
__UpperCamelCase =tokenize_kwargs
__UpperCamelCase ={}
if return_tensors is not None:
__UpperCamelCase =return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self , A_ , **A_ ) -> Dict[str, GenericTensor]:
__UpperCamelCase =self.framework
__UpperCamelCase =self.tokenizer(A_ , return_tensors=A_ , **A_ )
return model_inputs
def _a ( self , A_ ) -> Dict:
__UpperCamelCase =self.model(**A_ )
return model_outputs
def _a ( self , A_ , A_=False ) -> int:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *A_ , **A_ ) -> Any:
return super().__call__(*A_ , **A_ )
| 701 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682 | 0 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 | 0 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
_A = get_tests_dir('fixtures/dummy-config.json')
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(A_ , A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =AutoConfig.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =AutoConfig.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =AutoConfig.for_model('roberta' )
self.assertIsInstance(A_ , A_ )
def _a ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__UpperCamelCase =os.path.join(A_ , 'fake-roberta' )
os.makedirs(A_ , exist_ok=A_ )
with open(os.path.join(A_ , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
__UpperCamelCase =AutoConfig.from_pretrained(A_ )
self.assertEqual(type(A_ ) , A_ )
def _a ( self ) -> Union[str, Any]:
try:
AutoConfig.register('custom' , A_ )
# Wrong model type will raise an error
with self.assertRaises(A_ ):
AutoConfig.register('model' , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
AutoConfig.register('bert' , A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase =CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A_ )
__UpperCamelCase =AutoConfig.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _a ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
A_ , 'bert-base is not a local folder and is not a valid model identifier' ):
__UpperCamelCase =AutoConfig.from_pretrained('bert-base' )
def _a ( self ) -> List[str]:
with self.assertRaisesRegex(
A_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__UpperCamelCase =AutoConfig.from_pretrained(A_ , revision='aaaaaa' )
def _a ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
A_ , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
__UpperCamelCase =AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def _a ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A_ ):
__UpperCamelCase =AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A_ ):
__UpperCamelCase =AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=A_ )
__UpperCamelCase =AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=A_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A_ )
__UpperCamelCase =AutoConfig.from_pretrained(A_ , trust_remote_code=A_ )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def _a ( self ) -> str:
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = "new-model"
try:
AutoConfig.register('new-model' , A_ )
# If remote code is not set, the default is to use local
__UpperCamelCase =AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
__UpperCamelCase =AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=A_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
__UpperCamelCase =AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=A_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 703 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> str:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1e-4
def _a ( self ) -> int:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> int:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1e-4
def _a ( self ) -> List[Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 682 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _UpperCAmelCase ( ):
__UpperCamelCase =ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=SCREAMING_SNAKE_CASE__ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=SCREAMING_SNAKE_CASE__ )
return parser.parse_args()
def _UpperCAmelCase ( ):
__UpperCamelCase =parse_args()
# Import training_script as a module.
__UpperCamelCase =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__UpperCamelCase =script_fpath.stem
__UpperCamelCase =importlib.import_module(SCREAMING_SNAKE_CASE__ )
# Patch sys.argv
__UpperCamelCase =[args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 704 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682 | 0 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =1.5
__UpperCamelCase =int(factor * num_class_images )
__UpperCamelCase =ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=SCREAMING_SNAKE_CASE__ , aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images' , exist_ok=SCREAMING_SNAKE_CASE__ )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
__UpperCamelCase =client.query(text=SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
__UpperCamelCase =int(factor * num_images )
__UpperCamelCase =ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=SCREAMING_SNAKE_CASE__ , aesthetic_weight=0.1 , )
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =tqdm(desc='downloading real regularization images' , total=SCREAMING_SNAKE_CASE__ )
with open(F'{class_data_dir}/caption.txt' , 'w' ) as fa, open(F'{class_data_dir}/urls.txt' , 'w' ) as fa, open(
F'{class_data_dir}/images.txt' , 'w' ) as fa:
while total < num_class_images:
__UpperCamelCase =class_images[count]
count += 1
try:
__UpperCamelCase =requests.get(images['url'] )
if img.status_code == 2_00:
__UpperCamelCase =Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'{class_data_dir}/images/{total}.jpg' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser('' , add_help=SCREAMING_SNAKE_CASE__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=2_00 , type=SCREAMING_SNAKE_CASE__ )
return parser.parse_args()
if __name__ == "__main__":
_A = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 705 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
__UpperCamelCase =ksize + 1
__UpperCamelCase =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE__ ):
for x in range(SCREAMING_SNAKE_CASE__ ):
# distance from center
__UpperCamelCase =x - ksize // 2
__UpperCamelCase =y - ksize // 2
# degree to radiant
__UpperCamelCase =theta / 1_80 * np.pi
__UpperCamelCase =np.cos(_theta )
__UpperCamelCase =np.sin(_theta )
# get kernel x
__UpperCamelCase =cos_theta * px + sin_theta * py
# get kernel y
__UpperCamelCase =-sin_theta * px + cos_theta * py
# fill kernel
__UpperCamelCase =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_A = imread('../image_data/lena.jpg')
# turn image in gray scale value
_A = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_A = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_A = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_A = out / out.max() * 255
_A = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 706 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =original_name.split('.' )[0]
__UpperCamelCase =key.split('.' )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
__UpperCamelCase =orig_block_num - offset
__UpperCamelCase =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =OrderedDict()
__UpperCamelCase , __UpperCamelCase =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase =key[: key.find('proj' )]
__UpperCamelCase =key.replace(SCREAMING_SNAKE_CASE__ , F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm1' , 'before_norm' )
if "norm2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__UpperCamelCase =key.replace('head' , 'classifier' )
__UpperCamelCase =value
return new_state_dict
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =model_name[-3:]
__UpperCamelCase =10_00
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =(1, 10_00)
# set config attributes
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase =[2, 2, 6, 2]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s24":
__UpperCamelCase =[4, 4, 12, 4]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.9
elif size == "m36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
elif size == "m48":
__UpperCamelCase =[8, 8, 24, 8]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase =rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
__UpperCamelCase =PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase =torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__UpperCamelCase =torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__UpperCamelCase =torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__UpperCamelCase =torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__UpperCamelCase =torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 682 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 707 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=3 , A_=32 , A_=3 , A_=10 , A_=[10, 20, 30, 40] , A_=[1, 1, 2, 1] , A_=True , A_=True , A_="relu" , A_=3 , A_=None , ) -> int:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =num_channels
__UpperCamelCase =embeddings_size
__UpperCamelCase =hidden_sizes
__UpperCamelCase =depths
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_act
__UpperCamelCase =num_labels
__UpperCamelCase =scope
__UpperCamelCase =len(A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_labels )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> List[str]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _a ( self , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =RegNetModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self , A_ , A_ , A_ ) -> List[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =RegNetForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : List[Any] = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Dict = False
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =RegNetModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def _a ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ) -> List[str]:
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def _a ( self ) -> Any:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def _a ( self ) -> Union[str, Any]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
for name, module in model.named_modules():
if isinstance(A_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _a ( self ) -> Dict:
def check_hidden_states_output(A_ , A_ , A_ ):
__UpperCamelCase =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__UpperCamelCase =model(**self._prepare_for_class(A_ , A_ ) )
__UpperCamelCase =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase =self.model_tester.num_stages
self.assertEqual(len(A_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCamelCase =layer_type
__UpperCamelCase =True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase =True
check_hidden_states_output(A_ , A_ , A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def _a ( self ) -> List[Any]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =RegNetModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Any:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-0.4180, -1.5051, -3.4836] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
| 708 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 1 if input_a == input_a else 0
def _UpperCAmelCase ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 682 | 0 |
from __future__ import annotations
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[]
__UpperCamelCase =[]
__UpperCamelCase =0
__UpperCamelCase =sum(SCREAMING_SNAKE_CASE__ )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , ):
if sum(SCREAMING_SNAKE_CASE__ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE__ )) < max_sum:
return
if sum(SCREAMING_SNAKE_CASE__ ) == max_sum:
result.append(SCREAMING_SNAKE_CASE__ )
return
for index in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
create_state_space_tree(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE__ , remaining_nums_sum - nums[index] , )
_A = [3, 34, 4, 12, 5, 2]
_A = 9
_A = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 709 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 ):
__UpperCamelCase =right or len(SCREAMING_SNAKE_CASE__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_A = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _a ( cls ) -> Any:
__UpperCamelCase =TOKEN
HfFolder.save_token(A_ )
@classmethod
def _a ( cls ) -> Dict:
try:
delete_repo(token=cls._token , repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def _a ( self ) -> Tuple:
__UpperCamelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__UpperCamelCase =FlaxBertModel(A_ )
model.push_to_hub('test-model-flax' , use_auth_token=self._token )
__UpperCamelCase =FlaxBertModel.from_pretrained(f'{USER}/test-model-flax' )
__UpperCamelCase =flatten_dict(unfreeze(model.params ) )
__UpperCamelCase =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCamelCase =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(A_ , 1E-3 , msg=f'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(A_ , repo_id='test-model-flax' , push_to_hub=A_ , use_auth_token=self._token )
__UpperCamelCase =FlaxBertModel.from_pretrained(f'{USER}/test-model-flax' )
__UpperCamelCase =flatten_dict(unfreeze(model.params ) )
__UpperCamelCase =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCamelCase =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(A_ , 1E-3 , msg=f'{key} not identical' )
def _a ( self ) -> List[Any]:
__UpperCamelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__UpperCamelCase =FlaxBertModel(A_ )
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token )
__UpperCamelCase =FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
__UpperCamelCase =flatten_dict(unfreeze(model.params ) )
__UpperCamelCase =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCamelCase =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(A_ , 1E-3 , msg=f'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
A_ , repo_id='valid_org/test-model-flax-org' , push_to_hub=A_ , use_auth_token=self._token )
__UpperCamelCase =FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
__UpperCamelCase =flatten_dict(unfreeze(model.params ) )
__UpperCamelCase =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__UpperCamelCase =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(A_ , 1E-3 , msg=f'{key} not identical' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =True
__UpperCamelCase =flatten_dict(modela.params )
__UpperCamelCase =flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
__UpperCamelCase =False
return models_are_equal
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
__UpperCamelCase =FlaxBertModel(A_ )
__UpperCamelCase ='bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(A_ , A_ ) )
with self.assertRaises(A_ ):
__UpperCamelCase =FlaxBertModel.from_pretrained(A_ )
__UpperCamelCase =FlaxBertModel.from_pretrained(A_ , subfolder=A_ )
self.assertTrue(check_models_equal(A_ , A_ ) )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
__UpperCamelCase =FlaxBertModel(A_ )
__UpperCamelCase ='bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(A_ , A_ ) , max_shard_size='10KB' )
with self.assertRaises(A_ ):
__UpperCamelCase =FlaxBertModel.from_pretrained(A_ )
__UpperCamelCase =FlaxBertModel.from_pretrained(A_ , subfolder=A_ )
self.assertTrue(check_models_equal(A_ , A_ ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase ='bert'
__UpperCamelCase ='hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(A_ ):
__UpperCamelCase =FlaxBertModel.from_pretrained(A_ )
__UpperCamelCase =FlaxBertModel.from_pretrained(A_ , subfolder=A_ )
self.assertIsNotNone(A_ )
def _a ( self ) -> Any:
__UpperCamelCase ='bert'
__UpperCamelCase ='hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(A_ ):
__UpperCamelCase =FlaxBertModel.from_pretrained(A_ )
__UpperCamelCase =FlaxBertModel.from_pretrained(A_ , subfolder=A_ )
self.assertIsNotNone(A_ )
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A = 16
_A = 32
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int = 16 ):
__UpperCamelCase =AutoTokenizer.from_pretrained('bert-base-cased' )
__UpperCamelCase =load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase =datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase =16
elif accelerator.mixed_precision != "no":
__UpperCamelCase =8
else:
__UpperCamelCase =None
return tokenizer.pad(
SCREAMING_SNAKE_CASE__ , padding='longest' , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , )
# Instantiate dataloaders.
__UpperCamelCase =DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_A = mocked_dataloaders # noqa: F811
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , SCREAMING_SNAKE_CASE__ ) == "1":
__UpperCamelCase =2
# New Code #
__UpperCamelCase =int(args.gradient_accumulation_steps )
# Initialize accelerator
__UpperCamelCase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase =config['lr']
__UpperCamelCase =int(config['num_epochs'] )
__UpperCamelCase =int(config['seed'] )
__UpperCamelCase =int(config['batch_size'] )
__UpperCamelCase =evaluate.load('glue' , 'mrpc' )
set_seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase =model.to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase =AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
# Instantiate scheduler
__UpperCamelCase =get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =model(**SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =output.loss
accelerator.backward(SCREAMING_SNAKE_CASE__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase =model(**SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits.argmax(dim=-1 )
__UpperCamelCase , __UpperCamelCase =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , )
__UpperCamelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__UpperCamelCase =parser.parse_args()
__UpperCamelCase ={'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 711 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_A = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=A_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _a ( self ) -> Dict:
__UpperCamelCase =super().to_dict()
for k, v in d.items():
if isinstance(A_ , A_ ):
__UpperCamelCase =v.to_dict()
return d
| 682 | 0 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase__ :
"""simple docstring"""
@property
def _a ( self ) -> Dict:
return self.get_dummy_input()
@property
def _a ( self ) -> Dict:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def _a ( self , A_=True , A_=False , A_=False , A_=False , ) -> str:
__UpperCamelCase =4
__UpperCamelCase =32
__UpperCamelCase =(32, 32)
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =torch.device(A_ )
__UpperCamelCase =(batch_size, num_channels) + sizes
__UpperCamelCase =randn_tensor(A_ , generator=A_ , device=A_ )
__UpperCamelCase ={'hidden_states': hidden_states}
if include_temb:
__UpperCamelCase =128
__UpperCamelCase =randn_tensor((batch_size, temb_channels) , generator=A_ , device=A_ )
if include_res_hidden_states_tuple:
__UpperCamelCase =torch.manual_seed(1 )
__UpperCamelCase =(randn_tensor(A_ , generator=A_ , device=A_ ),)
if include_encoder_hidden_states:
__UpperCamelCase =floats_tensor((batch_size, 32, 32) ).to(A_ )
if include_skip_sample:
__UpperCamelCase =randn_tensor(((batch_size, 3) + sizes) , generator=A_ , device=A_ )
return dummy_input
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
__UpperCamelCase =32
if self.block_type == "mid":
init_dict.pop('out_channels' )
__UpperCamelCase =self.dummy_input
return init_dict, inputs_dict
def _a ( self , A_ ) -> int:
__UpperCamelCase , __UpperCamelCase =self.prepare_init_args_and_inputs_for_common()
__UpperCamelCase =self.block_class(**A_ )
unet_block.to(A_ )
unet_block.eval()
with torch.no_grad():
__UpperCamelCase =unet_block(**A_ )
if isinstance(A_ , A_ ):
__UpperCamelCase =output[0]
self.assertEqual(output.shape , self.output_shape )
__UpperCamelCase =output[0, -1, -3:, -3:]
__UpperCamelCase =torch.tensor(A_ ).to(A_ )
assert torch_all_close(output_slice.flatten() , A_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase =self.prepare_init_args_and_inputs_for_common()
__UpperCamelCase =self.block_class(**A_ )
model.to(A_ )
model.train()
__UpperCamelCase =model(**A_ )
if isinstance(A_ , A_ ):
__UpperCamelCase =output[0]
__UpperCamelCase =torch.device(A_ )
__UpperCamelCase =randn_tensor(output.shape , device=A_ )
__UpperCamelCase =torch.nn.functional.mse_loss(A_ , A_ )
loss.backward()
| 712 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : Optional[Any] = logging.get_logger(__name__)
_A : Dict = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = "deta"
UpperCAmelCase__ : List[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , A_=None , A_=900 , A_=2048 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=1024 , A_=8 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=True , A_=False , A_="sine" , A_=5 , A_=4 , A_=4 , A_=True , A_=300 , A_=True , A_=True , A_=1 , A_=5 , A_=2 , A_=1 , A_=1 , A_=5 , A_=2 , A_=0.1 , A_=0.25 , **A_ , ) -> int:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__UpperCamelCase =CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(A_ , A_ ):
__UpperCamelCase =backbone_config.pop('model_type' )
__UpperCamelCase =CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase =config_class.from_dict(A_ )
__UpperCamelCase =backbone_config
__UpperCamelCase =num_queries
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =d_model
__UpperCamelCase =encoder_ffn_dim
__UpperCamelCase =encoder_layers
__UpperCamelCase =encoder_attention_heads
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =decoder_layers
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =init_xavier_std
__UpperCamelCase =encoder_layerdrop
__UpperCamelCase =auxiliary_loss
__UpperCamelCase =position_embedding_type
# deformable attributes
__UpperCamelCase =num_feature_levels
__UpperCamelCase =encoder_n_points
__UpperCamelCase =decoder_n_points
__UpperCamelCase =two_stage
__UpperCamelCase =two_stage_num_proposals
__UpperCamelCase =with_box_refine
__UpperCamelCase =assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
__UpperCamelCase =class_cost
__UpperCamelCase =bbox_cost
__UpperCamelCase =giou_cost
# Loss coefficients
__UpperCamelCase =mask_loss_coefficient
__UpperCamelCase =dice_loss_coefficient
__UpperCamelCase =bbox_loss_coefficient
__UpperCamelCase =giou_loss_coefficient
__UpperCamelCase =eos_coefficient
__UpperCamelCase =focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def _a ( self ) -> int:
return self.encoder_attention_heads
@property
def _a ( self ) -> int:
return self.d_model
def _a ( self ) -> Dict:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.backbone_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 713 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = filter_non_english
def _a ( self ) -> Optional[Any]:
super().setUp()
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase ={}
__UpperCamelCase ={}
for i, value in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =i
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A_ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Any:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase ={}
for i, token in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =RoCBertWordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase =tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase =tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
__UpperCamelCase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ) -> List[str]:
__UpperCamelCase =['的', '人', '有']
__UpperCamelCase =''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =False
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.encode('你好' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode('你是谁' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='你好,你是谁'
__UpperCamelCase =tokenizer.tokenize(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase =tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 682 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : nn.Module
UpperCAmelCase__ : List[nn.Module] = field(default_factory=A_ )
UpperCAmelCase__ : list = field(default_factory=A_ )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ) -> str:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def _a ( self ) -> Optional[Any]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : nn.Module
UpperCAmelCase__ : nn.Module
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : List = field(default_factory=A_ )
UpperCAmelCase__ : List = field(default_factory=A_ )
UpperCAmelCase__ : bool = True
def __call__( self , A_ ) -> List[str]:
__UpperCamelCase =Tracker(self.dest )(A_ ).parametrized
__UpperCamelCase =Tracker(self.src )(A_ ).parametrized
__UpperCamelCase =list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
__UpperCamelCase =list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ) and self.raise_if_mismatch:
raise Exception(
f'Numbers of operations are different. Source module has {len(A_ )} operations while'
f' destination module has {len(A_ )}.' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , A_ ) -> Optional[int]:
super().__init__()
__UpperCamelCase =[]
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), f'Unexpected layer name {k}'
__UpperCamelCase =len(A_ ) + 1
feature_blocks.append((f'res{block_index}', v) )
__UpperCamelCase =nn.ModuleDict(A_ )
def _a ( self , A_ ) -> int:
return get_trunk_forward_outputs(
A_ , out_feat_keys=A_ , feature_blocks=self._feature_blocks , )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self , A_ ) -> str:
__UpperCamelCase =x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , A_ ) -> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
__UpperCamelCase =self.convert_name_to_timm(A_ )
__UpperCamelCase =partial(lambda: (timm.create_model(A_ , pretrained=A_ ).eval(), None) )
else:
__UpperCamelCase =super().__getitem__(A_ )
return val
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __getitem__( self , A_ ) -> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
__UpperCamelCase =RegNetModel
else:
__UpperCamelCase =RegNetForImageClassification
return val
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Tuple[str, str]] ):
for from_key, to_key in keys:
__UpperCamelCase =from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Callable[[], nn.Module] , SCREAMING_SNAKE_CASE__ : Callable[[], nn.Module] , SCREAMING_SNAKE_CASE__ : RegNetConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True , ):
print(F'Converting {name}...' )
with torch.no_grad():
__UpperCamelCase , __UpperCamelCase =from_model_func()
__UpperCamelCase =our_model_func(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =ModuleTransfer(src=SCREAMING_SNAKE_CASE__ , dest=SCREAMING_SNAKE_CASE__ , raise_if_mismatch=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((1, 3, 2_24, 2_24) )
module_transfer(SCREAMING_SNAKE_CASE__ )
if from_state_dict is not None:
__UpperCamelCase =[]
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__UpperCamelCase =[('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
__UpperCamelCase =manually_copy_vissl_head(SCREAMING_SNAKE_CASE__ , our_model.state_dict() , SCREAMING_SNAKE_CASE__ )
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =(
our_outputs.logits if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else our_outputs.last_hidden_state
)
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =from_output[-1] if type(SCREAMING_SNAKE_CASE__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__UpperCamelCase =our_outputs.hidden_states[-1]
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
__UpperCamelCase =2_24 if 'seer' not in name else 3_84
# we can use the convnext one
__UpperCamelCase =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=SCREAMING_SNAKE_CASE__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
print(F'Pushed {name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ):
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 1_60, 3_84] , groups_width=16 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 2_40, 5_28] , groups_width=24 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 1_28, 2_88, 6_72] , groups_width=16 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 1_68, 4_08, 9_12] , groups_width=24 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 1_92, 4_32, 10_08] , groups_width=48 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 2_40, 5_60, 13_60] , groups_width=40 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 3_92, 7_84, 16_24] , groups_width=56 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 2_40, 7_20, 19_20] , groups_width=1_20 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[2_56, 5_12, 8_96, 20_48] , groups_width=1_28 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[3_36, 6_72, 13_44, 25_20] , groups_width=1_68 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 1_52, 3_68] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 1_04, 2_08, 4_40] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 1_12, 2_56, 6_08] , groups_width=16 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 1_28, 3_20, 7_68] , groups_width=16 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 1_20, 3_36, 8_88] , groups_width=24 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 2_16, 5_76, 15_12] , groups_width=24 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[1_28, 1_92, 5_12, 10_88] , groups_width=64 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[1_44, 2_88, 5_76, 12_96] , groups_width=72 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[1_68, 4_48, 8_96, 20_16] , groups_width=56 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[2_24, 4_48, 8_96, 22_40] , groups_width=1_12 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[2_24, 4_48, 12_32, 30_24] , groups_width=1_12 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[2_32, 6_96, 13_92, 37_12] , groups_width=2_32 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[3_28, 9_84, 19_68, 49_20] , groups_width=3_28 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[5_28, 10_56, 29_04, 73_92] , groups_width=2_64 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[6_40, 16_96, 25_44, 50_88] , groups_width=6_40 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80] , groups_width=10_10 ),
}
__UpperCamelCase =NameToOurModelFuncMap()
__UpperCamelCase =NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__UpperCamelCase =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , model_dir=str(SCREAMING_SNAKE_CASE__ ) , map_location='cpu' )
__UpperCamelCase =model_func()
# check if we have a head, if yes add it
__UpperCamelCase =files['classy_state_dict']['base_model']['model']
__UpperCamelCase =model_state_dict['trunk']
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
return model.eval(), model_state_dict["heads"]
# pretrained
__UpperCamelCase =partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__UpperCamelCase =partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__UpperCamelCase =partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__UpperCamelCase =partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__UpperCamelCase =partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__UpperCamelCase =partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__UpperCamelCase =partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__UpperCamelCase =partial(
SCREAMING_SNAKE_CASE__ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=10_10 , w_a=17_44 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
SCREAMING_SNAKE_CASE__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
SCREAMING_SNAKE_CASE__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 714 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A = random.Random()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
if rng is None:
__UpperCamelCase =global_rng
__UpperCamelCase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =min_seq_length
__UpperCamelCase =max_seq_length
__UpperCamelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase =padding_value
__UpperCamelCase =sampling_rate
__UpperCamelCase =return_attention_mask
__UpperCamelCase =do_normalize
__UpperCamelCase =feature_size
__UpperCamelCase =chunk_length
__UpperCamelCase =hop_length
def _a ( self ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , A_=False , A_=False ) -> Any:
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase =[np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
__UpperCamelCase =WhisperFeatureExtractionTester(self )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase =self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase =self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCamelCase =feature_extractor(A_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCamelCase =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCamelCase =np.asarray(A_ )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
__UpperCamelCase =[x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self ) -> Dict:
import torch
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCamelCase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__UpperCamelCase =ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__UpperCamelCase =self._load_datasamples(1 )
__UpperCamelCase =WhisperFeatureExtractor()
__UpperCamelCase =feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =self._load_datasamples(1 )[0]
__UpperCamelCase =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCamelCase =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 682 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.