code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Optional[int] = parent
__magic_name__ :List[Any] = 1_3
__magic_name__ :Union[str, Any] = 7
__magic_name__ :Optional[Any] = True
__magic_name__ :Tuple = True
__magic_name__ :List[str] = True
__magic_name__ :List[Any] = True
__magic_name__ :int = 9_9
__magic_name__ :Any = 3_2
__magic_name__ :Union[str, Any] = 2
__magic_name__ :List[str] = 4
__magic_name__ :List[Any] = 3_7
__magic_name__ :Tuple = '''gelu'''
__magic_name__ :Any = 0.1
__magic_name__ :str = 0.1
__magic_name__ :List[str] = 5_1_2
__magic_name__ :int = 1_6
__magic_name__ :Any = 2
__magic_name__ :List[Any] = 0.02
__magic_name__ :Optional[Any] = 3
__magic_name__ :Tuple = 4
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :str = None
if self.use_input_mask:
__magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :str = None
if self.use_token_type_ids:
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ :Union[str, Any] = None
__magic_name__ :Tuple = None
__magic_name__ :str = None
if self.use_labels:
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ :str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase )
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ :List[str] = [input_ids, input_mask]
__magic_name__ :Any = model(__lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = True
__magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
__magic_name__ :Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = self.num_labels
__magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
__magic_name__ :Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :str = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.num_choices
__magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
__magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__magic_name__ :Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.num_labels
__magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
__magic_name__ :List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) :Union[str, Any] = config_and_inputs
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerModelTester(self )
__magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
__magic_name__ :int = 5_0_0_0_0
__magic_name__ :Tuple = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__magic_name__ :Any = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = tf.constant([[4, 1_0]] )
__magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__magic_name__ :Optional[Any] = emba(input_ids.shape )
__magic_name__ :List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__magic_name__ :Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
# 2,12,16,64
__magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__magic_name__ :List[str] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 0 |
import numpy as np
from PIL import Image
def _A ( _lowercase , _lowercase , _lowercase ) -> np.ndarray:
"""simple docstring"""
__UpperCamelCase = np.array(_lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
# compute the shape of the output matrix
__UpperCamelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__UpperCamelCase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__UpperCamelCase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__UpperCamelCase = 0
__UpperCamelCase = 0
return updated_arr
def _A ( _lowercase , _lowercase , _lowercase ) -> np.ndarray:
"""simple docstring"""
__UpperCamelCase = np.array(_lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
# compute the shape of the output matrix
__UpperCamelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__UpperCamelCase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__UpperCamelCase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__UpperCamelCase = 0
__UpperCamelCase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__snake_case = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 1 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""MobileViTFeatureExtractor"""]
UpperCAmelCase_ = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 2 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : Any = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
lowerCAmelCase = args.pruning_method
lowerCAmelCase = args.threshold
lowerCAmelCase = args.model_name_or_path.rstrip('/' )
lowerCAmelCase = args.target_model_path
print(F'Load fine-pruned model from {model_name_or_path}' )
lowerCAmelCase = torch.load(os.path.join(_UpperCAmelCase , 'pytorch_model.bin' ) )
lowerCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase = tensor
print(F'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase = tensor
print(F'Copied layer {name}' )
elif "bias" in name:
lowerCAmelCase = tensor
print(F'Copied layer {name}' )
else:
if pruning_method == "magnitude":
lowerCAmelCase = MagnitudeBinarizer.apply(inputs=_UpperCAmelCase , threshold=_UpperCAmelCase )
lowerCAmelCase = tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F'{prefix_}mask_scores']
lowerCAmelCase = TopKBinarizer.apply(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F'{prefix_}mask_scores']
lowerCAmelCase = ThresholdBinarizer.apply(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F'{prefix_}mask_scores']
lowerCAmelCase ,lowerCAmelCase = -0.1, 1.1
lowerCAmelCase = torch.sigmoid(_UpperCAmelCase )
lowerCAmelCase = s * (r - l) + l
lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
lowerCAmelCase = tensor * mask
print(F'Pruned layer {name}' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowerCAmelCase = os.path.join(
os.path.dirname(_UpperCAmelCase ) , F'bertarized_{os.path.basename(_UpperCAmelCase )}' )
if not os.path.isdir(_UpperCAmelCase ):
shutil.copytree(_UpperCAmelCase , _UpperCAmelCase )
print(F'\nCreated folder {target_model_path}' )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
__UpperCamelCase : List[Any] = parser.parse_args()
main(args)
| 4 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = BlipImageProcessor()
_lowerCAmelCase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_lowerCAmelCase = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
_lowerCAmelCase = InstructBlipProcessor(_lowercase , _lowercase , _lowercase )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).tokenizer
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).qformer_tokenizer
def _lowercase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
_lowerCAmelCase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
self.assertIsInstance(processor.qformer_tokenizer , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_qformer_tokenizer()
_lowerCAmelCase = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(_lowercase , return_tensors="""np""" )
_lowerCAmelCase = processor(images=_lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_qformer_tokenizer()
_lowerCAmelCase = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = processor(text=_lowercase )
_lowerCAmelCase = tokenizer(_lowercase , return_token_type_ids=_lowercase )
_lowerCAmelCase = qformer_tokenizer(_lowercase , return_token_type_ids=_lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_qformer_tokenizer()
_lowerCAmelCase = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_qformer_tokenizer()
_lowerCAmelCase = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase )
_lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase = processor.batch_decode(_lowercase )
_lowerCAmelCase = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_qformer_tokenizer()
_lowerCAmelCase = InstructBlipProcessor(
tokenizer=_lowercase , image_processor=_lowercase , qformer_tokenizer=_lowercase )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 5 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ""
lowerCamelCase_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase_ = None # compression type in fsspec. ex: "gzip"
lowerCamelCase_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self :Optional[int] , __A :str = "" , __A :Optional[str] = None , __A :Optional[dict] = None , **__A :List[str] ) -> Any:
"""simple docstring"""
super().__init__(self , **__A )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
SCREAMING_SNAKE_CASE__ = fsspec.open(
__A , mode="""rb""" , protocol=__A , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
SCREAMING_SNAKE_CASE__ = os.path.basename(self.file.path.split("""::""" )[0] )
SCREAMING_SNAKE_CASE__ = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
SCREAMING_SNAKE_CASE__ = None
@classmethod
def _snake_case ( cls :Any , __A :Tuple ) -> List[str]:
"""simple docstring"""
return super()._strip_protocol(__A ).lstrip("""/""" )
def _snake_case ( self :Union[str, Any] ) -> Tuple:
"""simple docstring"""
if self.dir_cache is None:
SCREAMING_SNAKE_CASE__ = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
SCREAMING_SNAKE_CASE__ = {f["""name"""]: f}
def _snake_case ( self :Optional[int] , __A :str ) -> str:
"""simple docstring"""
return self.file.open().read()
def _snake_case ( self :List[str] , __A :str , __A :str = "rb" , __A :int=None , __A :List[str]=True , __A :Optional[Any]=None , **__A :Union[str, Any] , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._strip_protocol(__A )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "bz2"
lowerCamelCase_ = "bz2"
lowerCamelCase_ = ".bz2"
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "gzip"
lowerCamelCase_ = "gzip"
lowerCamelCase_ = ".gz"
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "lz4"
lowerCamelCase_ = "lz4"
lowerCamelCase_ = ".lz4"
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "xz"
lowerCamelCase_ = "xz"
lowerCamelCase_ = ".xz"
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "zstd"
lowerCamelCase_ = "zstd"
lowerCamelCase_ = ".zst"
def __init__( self :List[Any] , __A :str , __A :str = "rb" , __A :Optional[str] = None , __A :Optional[dict] = None , __A :int = DEFAULT_BLOCK_SIZE , **__A :Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
fo=__A , mode=__A , target_protocol=__A , target_options=__A , block_size=__A , **__A , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
SCREAMING_SNAKE_CASE__ = self.file.__enter__
class UpperCamelCase_ :
def __init__( self :int , __A :Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = file_
def __enter__( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self :Optional[Any] , *__A :List[Any] , **__A :int ) -> Tuple:
"""simple docstring"""
self._file.__exit__(*__A , **__A )
def __iter__( self :Any ) -> Optional[int]:
"""simple docstring"""
return iter(self._file )
def _snake_case ( self :Dict ) -> Dict:
"""simple docstring"""
return next(self._file )
def __getattr__( self :Union[str, Any] , __A :List[str] ) -> Optional[Any]:
"""simple docstring"""
return getattr(self._file , __A )
def fixed_enter(*__A :List[Any] , **__A :Optional[int] ):
return WrappedFile(_enter(*__A , **__A ) )
SCREAMING_SNAKE_CASE__ = fixed_enter | 6 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ : Optional[Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __SCREAMING_SNAKE_CASE ( a__ : str=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) )
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[int] = None
_lowercase : str = None
def lowerCAmelCase_ ( self : Dict , __A : Optional[int] , __A : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
__A : List[Any] = dataset_module_factory(__A , cache_dir=__A )
__A : Tuple = import_main_class(dataset_module.module_path , dataset=__A )
__A : DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
__A : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__A : Union[str, Any] = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]:
__A : Optional[Any] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__A : Union[str, Any] = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : List[Any] = import_main_class(dataset_module.module_path )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__A : Any = None
builder_instance.download_and_prepare()
__A : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> List[str]:
__A : Tuple = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : str = import_main_class(dataset_module.module_path ,dataset=a__ )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
__A : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a__ ,a__ )
assert "train" in ds
assert isinstance(ds["""train"""] ,a__ )
assert next(iter(ds["""train"""] ) )
| 17 | 0 |
"""simple docstring"""
import math
def _snake_case ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_snake_case ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 7 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 | 0 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( __snake_case : float = 0.1 ) -> int:
__A : Any = 3
__A : Optional[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 16_000 ) -> Union[str, Any]:
A__ = int(round(sample_rate * max_length ) )
if len(__UpperCamelCase ) <= sample_length:
return wav
A__ = randint(0 , len(__UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"help": "Name of a dataset from the datasets package"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "A file containing the training audio paths and labels."} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "A file containing the validation audio paths and labels."} )
A__ : str = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
A__ : str = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
A__ : str = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
A__ : str = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
A__ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A__ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
A__ : float = field(
default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
A__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Name or path of preprocessor config."} )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
A__ : Optional[bool] = field(
default=UpperCAmelCase_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _a ( self : Dict ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , _snake_case , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def A ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , __UpperCamelCase , __UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
A__ = DatasetDict()
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
A__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'Make sure to set `--audio_column_name` to the correct audio column - one of '
f'''{", ".join(raw_datasets["train"].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'Make sure to set `--label_column_name` to the correct text column - one of '
f'''{", ".join(raw_datasets["train"].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
A__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
A__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
A__ = feature_extractor.model_input_names[0]
def train_transforms(__UpperCamelCase ):
A__ = []
for audio in batch[data_args.audio_column_name]:
A__ = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__UpperCamelCase )
A__ = feature_extractor(__UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
A__ = {model_input_name: inputs.get(__UpperCamelCase )}
A__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__UpperCamelCase ):
A__ = [audio['array'] for audio in batch[data_args.audio_column_name]]
A__ = feature_extractor(__UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
A__ = {model_input_name: inputs.get(__UpperCamelCase )}
A__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A__ = raw_datasets['train'].features[data_args.label_column_name].names
A__ , A__ = {}, {}
for i, label in enumerate(__UpperCamelCase ):
A__ = str(__UpperCamelCase )
A__ = label
# Load the accuracy metric from the datasets package
A__ = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__UpperCamelCase ):
A__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__UpperCamelCase , references=eval_pred.label_ids )
A__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__UpperCamelCase ) , labelaid=__UpperCamelCase , idalabel=__UpperCamelCase , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
A__ = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__UpperCamelCase , output_all_columns=__UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
A__ = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__UpperCamelCase , output_all_columns=__UpperCamelCase )
# Initialize our trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=__UpperCamelCase , tokenizer=__UpperCamelCase , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A__ = trainer.evaluate()
trainer.log_metrics('eval' , __UpperCamelCase )
trainer.save_metrics('eval' , __UpperCamelCase )
# Write model card and (optionally) push to hub
A__ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
if __name__ == "__main__":
main()
| 9 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 0 |
def _snake_case ( __snake_case , __snake_case ):
while a != 0:
_UpperCamelCase , _UpperCamelCase = b % a, a
return b
def _snake_case ( __snake_case , __snake_case ):
if gcd(__snake_case , __snake_case ) != 1:
_UpperCamelCase = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__snake_case )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1, 0, a
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0, 1, m
while va != 0:
_UpperCamelCase = ua // va
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 10 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if any(not isinstance(__A , __A) or x < 0 for x in sequence):
raise TypeError('''Sequence must be list of non-negative integers''')
for _ in range(len(__A)):
for i, (rod_upper, rod_lower) in enumerate(zip(__A , sequence[1:])):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 11 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : List[str] = 't5'
__lowerCAmelCase : List[str] = ['past_key_values']
__lowerCAmelCase : List[str] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , SCREAMING_SNAKE_CASE_=3_21_28 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1E-6 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Optional[Any] = vocab_size
lowercase__ : Union[str, Any] = d_model
lowercase__ : Union[str, Any] = d_kv
lowercase__ : List[Any] = d_ff
lowercase__ : Optional[Any] = num_layers
lowercase__ : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ : Any = num_heads
lowercase__ : Tuple = relative_attention_num_buckets
lowercase__ : Union[str, Any] = relative_attention_max_distance
lowercase__ : str = dropout_rate
lowercase__ : Dict = layer_norm_epsilon
lowercase__ : Optional[int] = initializer_factor
lowercase__ : str = feed_forward_proj
lowercase__ : Optional[int] = use_cache
lowercase__ : str = self.feed_forward_proj.split("""-""")
lowercase__ : Tuple = act_info[-1]
lowercase__ : Optional[int] = act_info[0] == """gated"""
if len(SCREAMING_SNAKE_CASE_) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE_) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase__ : str = """gelu_new"""
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class _snake_case ( UpperCAmelCase_ ):
@property
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
lowercase__ : List[str] = """past_encoder_sequence + sequence"""
lowercase__ : int = {0: """batch"""}
lowercase__ : Optional[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowercase__ : int = {0: """batch""", 1: """decoder_sequence"""}
lowercase__ : Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction="""inputs""")
return common_inputs
@property
def lowercase__ ( self):
'''simple docstring'''
return 13
| 12 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 | 0 |
'''simple docstring'''
A__ : dict[tuple[int, int, int], int] = {}
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__lowerCamelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 )
__lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime
__lowerCamelCase : Union[str, Any] = prizestrings
return prizestrings
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int:
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 13 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : int , __A : Tuple , __A : List[Any] ):
__A : Optional[int] = None
__A : Any = None
__A : int = graph
self._normalize_graph(__A , __A )
__A : str = len(__A )
__A : Optional[int] = None
def lowerCAmelCase_ ( self : int , __A : Any , __A : Optional[Any] ):
if sources is int:
__A : Dict = [sources]
if sinks is int:
__A : Optional[int] = [sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
__A : str = sources[0]
__A : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
__A : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : str = max_input_flow
__A : Union[str, Any] = 0
__A : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : int = max_input_flow
__A : Optional[Any] = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict ):
__A : Dict = algorithm(self )
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : str ):
__A : Any = flow_network
__A : int = flow_network.verticesCount
__A : List[Any] = flow_network.sourceIndex
__A : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : str = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Any = True
def lowerCAmelCase_ ( self : List[str] ):
pass
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Any , __A : List[str] ):
super().__init__(__A )
# use this to save your result
__A : str = -1
def lowerCAmelCase_ ( self : Any ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Dict ):
super().__init__(__A )
__A : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Optional[Any] = [0] * self.verticies_count
__A : Union[str, Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Dict = 0
while i < len(__A ):
__A : List[Any] = vertices_list[i]
__A : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
__A : Any = 0
else:
i += 1
__A : Optional[int] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def lowerCAmelCase_ ( self : Dict , __A : List[str] , __A : Optional[Any] ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Dict = self.heights[to_index]
if min_height is not None:
__A : Optional[int] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17 | 0 |
from __future__ import annotations
import math
def __UpperCAmelCase ( __a : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__a ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
a__ = [num for num in range(3, 100001, 2) if not is_prime(num)]
def __UpperCAmelCase ( __a : int ) -> list[int]:
"""simple docstring"""
if not isinstance(__a ,__a ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_a : Dict = []
for num in range(len(__a ) ):
_a : Optional[Any] = 0
while 2 * i * i <= odd_composites[num]:
_a : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(__a ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__a ) == n:
return list_nums
return []
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 14 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str | Literal[False]:
__A : Tuple = list(a__ )
__A : Optional[int] = list(a__ )
__A : int = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
__A : int = """_"""
if count > 1:
return False
else:
return "".join(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ) -> list[str]:
__A : Optional[Any] = []
while True:
__A : Tuple = ["""$"""] * len(a__ )
__A : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 ,len(a__ ) ):
__A : int = compare_string(binary[i] ,binary[j] )
if k is False:
__A : List[str] = """*"""
__A : Any = """*"""
temp.append("""X""" )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
__A : Optional[Any] = list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Sequence[float] ) -> list[str]:
__A : List[str] = []
for minterm in minterms:
__A : List[Any] = """"""
for _ in range(a__ ):
__A : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : int ) -> bool:
__A : Optional[Any] = list(a__ )
__A : Tuple = list(a__ )
__A : Any = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ,a__ : list[str] ) -> list[str]:
__A : Optional[int] = []
__A : Tuple = [0] * len(a__ )
for i in range(len(chart[0] ) ):
__A : str = 0
__A : Any = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
__A : Optional[Any] = j
if count == 1:
__A : int = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
__A : List[str] = 0
temp.append(prime_implicants[i] )
while True:
__A : Optional[Any] = 0
__A : Any = -1
__A : int = 0
for i in range(len(a__ ) ):
__A : List[Any] = chart[i].count(1 )
if count_n > max_n:
__A : Dict = count_n
__A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
__A : Union[str, Any] = 0
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ,a__ : list[str] ) -> list[list[int]]:
__A : Any = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
__A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,a__ ):
__A : Union[str, Any] = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = int(input("""Enter the no. of variables\n""" ) )
__A : List[str] = [
float(a__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__A : Dict = decimal_to_binary(a__ ,a__ )
__A : Union[str, Any] = check(a__ )
print("""Prime Implicants are:""" )
print(a__ )
__A : Optional[Any] = prime_implicant_chart(a__ ,a__ )
__A : Any = selection(a__ ,a__ )
print("""Essential Prime Implicants are:""" )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = 384
if "tiny" in model_name:
lowercase__ = [3, 3, 9, 3]
lowercase__ = [96, 192, 384, 768]
if "small" in model_name:
lowercase__ = [3, 3, 27, 3]
lowercase__ = [96, 192, 384, 768]
if "base" in model_name:
lowercase__ = [3, 3, 27, 3]
lowercase__ = [128, 256, 512, 1024]
lowercase__ = 512
if "large" in model_name:
lowercase__ = [3, 3, 27, 3]
lowercase__ = [192, 384, 768, 1536]
lowercase__ = 768
if "xlarge" in model_name:
lowercase__ = [3, 3, 27, 3]
lowercase__ = [256, 512, 1024, 2048]
lowercase__ = 1024
# set label information
lowercase__ = 150
lowercase__ = """huggingface/label-files"""
lowercase__ = """ade20k-id2label.json"""
lowercase__ = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) )
lowercase__ = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = ConvNextConfig(
depths=__magic_name__ , hidden_sizes=__magic_name__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
lowercase__ = UperNetConfig(
backbone_config=__magic_name__ , auxiliary_in_channels=__magic_name__ , num_labels=__magic_name__ , idalabel=__magic_name__ , labelaid=__magic_name__ , )
return config
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase ( __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = dct.pop(__magic_name__ )
lowercase__ = val
def UpperCamelCase ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
lowercase__ = model_name_to_url[model_name]
lowercase__ = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="""cpu""" )["""state_dict"""]
lowercase__ = get_upernet_config(__magic_name__ )
lowercase__ = UperNetForSemanticSegmentation(__magic_name__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(__magic_name__ )
if "bn" in key:
lowercase__ = key.replace("""bn""" , """batch_norm""" )
lowercase__ = val
# rename keys
lowercase__ = create_rename_keys(__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
model.load_state_dict(__magic_name__ )
# verify on image
lowercase__ = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
lowercase__ = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ).convert("""RGB""" )
lowercase__ = SegformerImageProcessor()
lowercase__ = processor(__magic_name__ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
lowercase__ = model(__magic_name__ )
if model_name == "upernet-convnext-tiny":
lowercase__ = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
lowercase__ = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
lowercase__ = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
lowercase__ = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
lowercase__ = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __magic_name__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[F'upernet-convnext-{size}' for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A : Union[str, Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 15 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : Any ) -> Optional[int]: # noqa: E741
while r - l > 1:
__A : Any = (l + r) // 2
if v[m] >= key:
__A : Optional[int] = m
else:
__A : List[Any] = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ) -> int:
if len(a__ ) == 0:
return 0
__A : str = [0] * len(a__ )
__A : List[str] = 1
__A : List[Any] = v[0]
for i in range(1 ,len(a__ ) ):
if v[i] < tail[0]:
__A : int = v[i]
elif v[i] > tail[length - 1]:
__A : Union[str, Any] = v[i]
length += 1
else:
__A : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : List[str] = logging.get_logger(__name__)
__A : str = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "resnet"
lowerCamelCase__ = ["basic", "bottleneck"]
def __init__( self : List[Any] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : int=[256, 512, 1024, 2048] , __lowerCamelCase : Dict=[3, 4, 6, 3] , __lowerCamelCase : List[str]="bottleneck" , __lowerCamelCase : Optional[int]="relu" , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=None , **__lowerCamelCase : Union[str, Any] , ):
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = layer_type
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = downsample_in_first_stage
SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = version.parse("1.11" )
@property
def _snake_case ( self : List[Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : Union[str, Any] ):
return 1e-3 | 16 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "swinv2"
__lowerCamelCase : int = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=96 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 12, 24] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=32 , **_lowerCAmelCase , ) -> Tuple:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
_lowerCAmelCase = (0, 0, 0, 0)
| 18 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=0 , ) -> Any:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = projection_dim
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
_UpperCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = TFDPRContextEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = TFDPRReader(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowercase__ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__a)
@slow
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRReader.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''')
_UpperCamelCase = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]]) # [CLS] hello, is my dog cute? [SEP]
_UpperCamelCase = model(__a)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4))
| 19 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17 | 0 |
def _lowercase( __a : list ):
a__ =len(__a )
for i in range(1 , __a ):
a__ =collection[i]
a__ =0
a__ =i - 1
while low <= high:
a__ =(low + high) // 2
if val < collection[mid]:
a__ =mid - 1
else:
a__ =mid + 1
for j in range(__a , __a , -1 ):
a__ =collection[j - 1]
a__ =val
return collection
if __name__ == "__main__":
_lowerCAmelCase: Optional[int] = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: str = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 20 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> List[Any]:
# Initialise PyTorch model
__A : Dict = MobileBertConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__A : Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
__A : Dict = load_tf_weights_in_mobilebert(a__ ,a__ ,a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 17 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None ):
if start is None:
__magic_name__ : Tuple =0
if end is None:
__magic_name__ : Tuple =len(lowerCamelCase ) - 1
if start >= end:
return
__magic_name__ : str =(start + end) // 2
slowsort(lowerCamelCase , lowerCamelCase , lowerCamelCase )
slowsort(lowerCamelCase , mid + 1 , lowerCamelCase )
if sequence[end] < sequence[mid]:
__magic_name__ , __magic_name__ : Tuple =sequence[mid], sequence[end]
slowsort(lowerCamelCase , lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 21 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
'''simple docstring'''
from math import pi, sqrt
def snake_case_ (UpperCamelCase : float ):
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(UpperCamelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(UpperCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def snake_case_ ():
'''simple docstring'''
assert gamma(0.5 ) == sqrt(UpperCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = 1.0
while num:
_snake_case : Dict = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 22 |
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
snake_case__ : Optional[Any] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 23 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 0 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int )-> list[list[int]]:
'''simple docstring'''
__snake_case = []
create_all_state(1 , _lowerCamelCase , _lowerCamelCase , [] , _lowerCamelCase )
return result
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : list[list[int]] , )-> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCamelCase , total_number - level + 2 ):
current_list.append(_lowerCamelCase )
create_all_state(i + 1 , _lowerCamelCase , level - 1 , _lowerCamelCase , _lowerCamelCase )
current_list.pop()
def _UpperCamelCase (_lowerCamelCase : list[list[int]] )-> None:
'''simple docstring'''
for i in total_list:
print(*_lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : str = generate_all_combinations(n, k)
print_all_state(total_list)
| 24 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a_ = random.Random()
def lowerCamelCase__ ( _a , _a=1.0 , _a=None , _a=None):
if rng is None:
SCREAMING_SNAKE_CASE : List[str] = global_rng
SCREAMING_SNAKE_CASE : Optional[int] = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , a : Any , a : Union[str, Any]=7 , a : List[Any]=400 , a : str=2000 , a : Dict=2048 , a : List[Any]=128 , a : Tuple=1 , a : Union[str, Any]=512 , a : List[str]=30 , a : Tuple=4_4100 , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : List[str] = min_seq_length
SCREAMING_SNAKE_CASE : List[str] = max_seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Dict = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = feature_size
SCREAMING_SNAKE_CASE : List[Any] = num_audio_channels
SCREAMING_SNAKE_CASE : Optional[Any] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = chunk_length
SCREAMING_SNAKE_CASE : List[str] = sampling_rate
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __UpperCamelCase ( self : Optional[int] , a : int=False , a : Tuple=False ) -> Union[str, Any]:
"""simple docstring"""
def _flatten(a : Any ):
return list(itertools.chain(*a ) )
if equal_length:
SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Any = [np.asarray(a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =TvltFeatureExtractor
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractionTester(self )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(a , "spectrogram_length" ) )
self.assertTrue(hasattr(a , "feature_size" ) )
self.assertTrue(hasattr(a , "num_audio_channels" ) )
self.assertTrue(hasattr(a , "hop_length" ) )
self.assertTrue(hasattr(a , "chunk_length" ) )
self.assertTrue(hasattr(a , "sampling_rate" ) )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = feat_extract_first.save_pretrained(a )[0]
check_json_file_has_correct_format(a )
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class.from_pretrained(a )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : int = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : str = dict_first.pop("mel_filters" )
SCREAMING_SNAKE_CASE : str = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(a , a ) )
self.assertEqual(a , a )
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(a , "feat_extract.json" )
feat_extract_first.to_json_file(a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class.from_json_file(a )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : Tuple = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : str = dict_first.pop("mel_filters" )
SCREAMING_SNAKE_CASE : Optional[Any] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(a , a ) )
self.assertEqual(a , a )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : str = [np.asarray(a ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : str = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = feature_extractor(a , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(
a , return_tensors="np" , sampling_rate=4_4100 , mask_audio=a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(a )
SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(a , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __UpperCamelCase ( self : List[Any] , a : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Dict = ds.sort("id" ).select(range(a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : Tuple = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : Any = feature_extractor(a , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , a , atol=1e-4 ) ) | 25 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 0 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : Union[str, Any] = len(_lowerCamelCase )
# We need to create solution object to save path.
__snake_case : Optional[Any] = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
__snake_case : int = run_maze(_lowerCamelCase , 0 , 0 , _lowerCamelCase )
if solved:
print("""\n""".join(str(_lowerCamelCase ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : str = len(_lowerCamelCase )
# Final check point.
if i == j == (size - 1):
__snake_case : Tuple = 1
return True
__snake_case : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
__snake_case : int = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__snake_case : Union[str, Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__snake_case : Union[str, Any] = 1
# check for directions
if (
run_maze(_lowerCamelCase , i + 1 , _lowerCamelCase , _lowerCamelCase )
or run_maze(_lowerCamelCase , _lowerCamelCase , j + 1 , _lowerCamelCase )
or run_maze(_lowerCamelCase , i - 1 , _lowerCamelCase , _lowerCamelCase )
or run_maze(_lowerCamelCase , _lowerCamelCase , j - 1 , _lowerCamelCase )
):
return True
__snake_case : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A : List[str] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase_ = 1_2_8_0_2_2
UpperCamelCase_ = 1_2_8_0_2_8
@require_sentencepiece
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[str] = MaMaaaTokenizer
A : Optional[int] = False
A : Tuple = False
A : Optional[Any] = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : str = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
SCREAMING_SNAKE_CASE : List[str] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : List[str] = Path(self.tmpdirname )
save_json(A, save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(A, save_dir / VOCAB_FILES_NAMES['spm_file'] )
SCREAMING_SNAKE_CASE : Any = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = '</s>'
SCREAMING_SNAKE_CASE : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ), A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0], '</s>' )
self.assertEqual(vocab_keys[1], '<unk>' )
self.assertEqual(vocab_keys[-1], '<s>' )
self.assertEqual(len(A ), tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('Skip this test while all models are still to be uploaded.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(A, ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ), [2, 3, 4, 5, 6], )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(A, ['▁This', '▁is', '▁a', '▁t', 'est'] )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_tokens_to_string(A )
self.assertEqual(A, 'This is a test' )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = {'input_ids': [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A, model_name='facebook/m2m100_418M', revision='c168bae485c864188cf9aa0e4108b0b6934dc91e', )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
'''simple docstring'''
A : Dict = '''facebook/m2m100_418M'''
A : Dict = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
A : Optional[int] = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
A : Dict = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='en', tgt_lang='fr' )
SCREAMING_SNAKE_CASE : List[Any] = 1
return cls
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('ar' ), 128_006 )
self.assertEqual(self.tokenizer.get_lang_id('en' ), 128_022 )
self.assertEqual(self.tokenizer.get_lang_id('ro' ), 128_076 )
self.assertEqual(self.tokenizer.get_lang_id('mr' ), 128_063 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.get_vocab()
self.assertEqual(len(A ), self.tokenizer.vocab_size )
self.assertEqual(vocab['<unk>'], 3 )
self.assertIn(self.tokenizer.get_lang_token('en' ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 'en'
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertIn(A, self.tokenizer.all_special_ids )
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(A, skip_special_tokens=A )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=A )
self.assertEqual(A, A )
self.assertNotIn(self.tokenizer.eos_token, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(A )
SCREAMING_SNAKE_CASE : Optional[int] = MaMaaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.lang_token_to_id, A )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 'en'
SCREAMING_SNAKE_CASE : Tuple = 'fr'
SCREAMING_SNAKE_CASE : Any = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : List[str] = shift_tokens_right(
batch['labels'], self.tokenizer.pad_token_id, self.tokenizer.eos_token_id )
for k in batch:
SCREAMING_SNAKE_CASE : Any = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
SCREAMING_SNAKE_CASE : str = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('mr' )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
SCREAMING_SNAKE_CASE : int = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('zh' )] )
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.tokenizer._build_translation_inputs('A test', return_tensors='pt', src_lang='en', tgt_lang='ar' )
self.assertEqual(
nested_simplify(A ), {
# en_XX, A, test, EOS
'input_ids': [[128_022, 58, 4_183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 128_006,
}, )
| 28 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=3 , UpperCAmelCase=18 , UpperCAmelCase=30 , UpperCAmelCase=400 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=[0.5, 0.5, 0.5] , UpperCAmelCase=[0.5, 0.5, 0.5] , ):
lowerCamelCase_ = size if size is not None else {'''shortest_edge''': 18}
lowerCamelCase_ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
def UpperCAmelCase__ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = LevitImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = LevitImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''size''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self ):
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self ):
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase_ = image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 29 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ : Optional[Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __SCREAMING_SNAKE_CASE ( a__ : str=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) )
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[int] = None
_lowercase : str = None
def lowerCAmelCase_ ( self : Dict , __A : Optional[int] , __A : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
__A : List[Any] = dataset_module_factory(__A , cache_dir=__A )
__A : Tuple = import_main_class(dataset_module.module_path , dataset=__A )
__A : DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
__A : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__A : Union[str, Any] = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]:
__A : Optional[Any] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__A : Union[str, Any] = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : List[Any] = import_main_class(dataset_module.module_path )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__A : Any = None
builder_instance.download_and_prepare()
__A : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> List[str]:
__A : Tuple = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : str = import_main_class(dataset_module.module_path ,dataset=a__ )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
__A : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a__ ,a__ )
assert "train" in ds
assert isinstance(ds["""train"""] ,a__ )
assert next(iter(ds["""train"""] ) )
| 17 | 0 |
from ..utils import DummyObject, requires_backends
class __a( metaclass=_a ):
"""simple docstring"""
lowerCAmelCase = ['''flax''', '''transformers''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(cls ,['''flax''', '''transformers'''] )
class __a( metaclass=_a ):
"""simple docstring"""
lowerCAmelCase = ['''flax''', '''transformers''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> str:
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls ,['''flax''', '''transformers'''] )
class __a( metaclass=_a ):
"""simple docstring"""
lowerCAmelCase = ['''flax''', '''transformers''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
requires_backends(cls ,['''flax''', '''transformers'''] )
class __a( metaclass=_a ):
"""simple docstring"""
lowerCAmelCase = ['''flax''', '''transformers''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Any:
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def a__ ( cls ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
requires_backends(cls ,['''flax''', '''transformers'''] ) | 30 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Optional[Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 31 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 0 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_UpperCAmelCase = load_file(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_UpperCAmelCase = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
_UpperCAmelCase = pipeline.text_encoder
else:
_UpperCAmelCase = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
_UpperCAmelCase = pipeline.unet
# find the target layer
_UpperCAmelCase = layer_infos.pop(0 )
while len(SCREAMING_SNAKE_CASE_ ) > -1:
try:
_UpperCAmelCase = curr_layer.__getattr__(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
_UpperCAmelCase = layer_infos.pop(0 )
elif len(SCREAMING_SNAKE_CASE_ ) == 0:
break
except Exception:
if len(SCREAMING_SNAKE_CASE_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_UpperCAmelCase = layer_infos.pop(0 )
_UpperCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(SCREAMING_SNAKE_CASE_ )
else:
pair_keys.append(SCREAMING_SNAKE_CASE_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_UpperCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_UpperCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).unsqueeze(2 ).unsqueeze(3 )
else:
_UpperCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
_UpperCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# update visited list
for item in pair_keys:
visited.append(SCREAMING_SNAKE_CASE_ )
return pipeline
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.base_model_path
UpperCAmelCase_ = args.checkpoint_path
UpperCAmelCase_ = args.dump_path
UpperCAmelCase_ = args.lora_prefix_unet
UpperCAmelCase_ = args.lora_prefix_text_encoder
UpperCAmelCase_ = args.alpha
UpperCAmelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCAmelCase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 32 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 0 |
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 33 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17 | 0 |
"""simple docstring"""
def __snake_case ( _lowercase = 100_0000 ):
"""simple docstring"""
UpperCamelCase = set(range(3 ,_lowercase ,2 ) )
primes.add(2 )
for p in range(3 ,_lowercase ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,_lowercase ,_lowercase ) ) )
UpperCamelCase = [float(_lowercase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowercase ,limit + 1 ,_lowercase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }') | 34 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a_ :Optional[int] = logging.get_logger(__name__)
a_ :Union[str, Any] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = '''marian'''
lowerCamelCase : Tuple = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[Any] , _lowercase : str=5_81_01 , _lowercase : Union[str, Any]=None , _lowercase : Tuple=10_24 , _lowercase : List[Any]=12 , _lowercase : int=40_96 , _lowercase : int=16 , _lowercase : str=12 , _lowercase : List[str]=40_96 , _lowercase : Tuple=16 , _lowercase : List[Any]=0.0 , _lowercase : Any=0.0 , _lowercase : List[Any]=True , _lowercase : Dict=True , _lowercase : Union[str, Any]="gelu" , _lowercase : int=10_24 , _lowercase : Optional[Any]=0.1 , _lowercase : List[Any]=0.0 , _lowercase : Optional[int]=0.0 , _lowercase : str=0.02 , _lowercase : Tuple=5_81_00 , _lowercase : int=False , _lowercase : Any=5_81_00 , _lowercase : Tuple=0 , _lowercase : Tuple=0 , _lowercase : List[Any]=True , **_lowercase : int , ):
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = decoder_vocab_size or vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : int = d_model
SCREAMING_SNAKE_CASE__ : Tuple = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : int = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Tuple = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : int = dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE__ : List[str] = activation_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE__ : Tuple = init_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = use_cache
SCREAMING_SNAKE_CASE__ : str = encoder_layers
SCREAMING_SNAKE_CASE__ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
class lowercase ( _UpperCAmelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowercase__ ( self : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ : Any = {0: '''batch'''}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.num_layers
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE__ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE__ : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowercase__ ( self : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super(_lowercase , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_layers
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE__ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowercase__ ( self : Optional[Any] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE__ : str = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Generate decoder inputs
SCREAMING_SNAKE_CASE__ : str = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE__ : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Dict = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE__ : Optional[int] = dict(**_lowercase , **_lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE__ : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : str = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowercase , _lowercase )] , dim=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.num_layers
SCREAMING_SNAKE_CASE__ : str = min(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = max(_lowercase , _lowercase ) - min_num_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowercase , _lowercase ):
common_inputs["past_key_values"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) )
return common_inputs
def lowercase__ ( self : int , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE__ : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : Any = seqlen + 2
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Any = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
SCREAMING_SNAKE_CASE__ : List[str] = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase )
]
return common_inputs
def lowercase__ ( self : Optional[Any] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : str = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : str = tokenizer.num_special_tokens_to_add(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : List[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(tokenizer(_lowercase , return_tensors=_lowercase ) )
return common_inputs
def lowercase__ ( self : List[str] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = self._generate_dummy_inputs_for_causal_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
return common_inputs
def lowercase__ ( self : str , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : Optional[int] , _lowercase : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super()._flatten_past_key_values_(_lowercase , _lowercase , _lowercase , _lowercase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = super(_lowercase , self )._flatten_past_key_values_(
_lowercase , _lowercase , _lowercase , _lowercase )
@property
def lowercase__ ( self : Dict ):
return 1E-4
| 35 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = ''''''
__lowerCamelCase : Union[str, Any] = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(self ,**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = repo_info
snake_case : Dict = token
snake_case : Any = None
def snake_case_ ( self ):
'''simple docstring'''
if self.dir_cache is None:
snake_case : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
snake_case : Union[str, Any] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(SCREAMING_SNAKE_CASE_ ): {"""name""": str(SCREAMING_SNAKE_CASE_ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "rb" ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
if not isinstance(self.repo_info ,SCREAMING_SNAKE_CASE_ ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
snake_case : Tuple = hf_hub_url(self.repo_info.id ,SCREAMING_SNAKE_CASE_ ,revision=self.repo_info.sha )
return fsspec.open(
SCREAMING_SNAKE_CASE_ ,mode=SCREAMING_SNAKE_CASE_ ,headers=get_authentication_headers_for_url(SCREAMING_SNAKE_CASE_ ,use_auth_token=self.token ) ,client_kwargs={"""trust_env""": True} ,).open()
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
self._get_dirs()
snake_case : List[Any] = self._strip_protocol(SCREAMING_SNAKE_CASE_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
self._get_dirs()
snake_case : List[str] = PurePosixPath(path.strip("""/""" ) )
snake_case : Optional[int] = {}
for p, f in self.dir_cache.items():
snake_case : List[str] = PurePosixPath(p.strip("""/""" ) )
snake_case : int = p.parent
if root == path:
snake_case : Any = f
snake_case : Optional[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 36 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : int , __A : Tuple , __A : List[Any] ):
__A : Optional[int] = None
__A : Any = None
__A : int = graph
self._normalize_graph(__A , __A )
__A : str = len(__A )
__A : Optional[int] = None
def lowerCAmelCase_ ( self : int , __A : Any , __A : Optional[Any] ):
if sources is int:
__A : Dict = [sources]
if sinks is int:
__A : Optional[int] = [sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
__A : str = sources[0]
__A : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
__A : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : str = max_input_flow
__A : Union[str, Any] = 0
__A : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : int = max_input_flow
__A : Optional[Any] = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict ):
__A : Dict = algorithm(self )
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : str ):
__A : Any = flow_network
__A : int = flow_network.verticesCount
__A : List[Any] = flow_network.sourceIndex
__A : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : str = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Any = True
def lowerCAmelCase_ ( self : List[str] ):
pass
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Any , __A : List[str] ):
super().__init__(__A )
# use this to save your result
__A : str = -1
def lowerCAmelCase_ ( self : Any ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Dict ):
super().__init__(__A )
__A : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Optional[Any] = [0] * self.verticies_count
__A : Union[str, Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Dict = 0
while i < len(__A ):
__A : List[Any] = vertices_list[i]
__A : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
__A : Any = 0
else:
i += 1
__A : Optional[int] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def lowerCAmelCase_ ( self : Dict , __A : List[str] , __A : Optional[Any] ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Dict = self.heights[to_index]
if min_height is not None:
__A : Optional[int] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17 | 0 |
from __future__ import annotations
from collections import namedtuple
def UpperCamelCase_ ( __a , __a , __a ) -> tuple:
a__ : Union[str, Any] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str | Literal[False]:
__A : Tuple = list(a__ )
__A : Optional[int] = list(a__ )
__A : int = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
__A : int = """_"""
if count > 1:
return False
else:
return "".join(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ) -> list[str]:
__A : Optional[Any] = []
while True:
__A : Tuple = ["""$"""] * len(a__ )
__A : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 ,len(a__ ) ):
__A : int = compare_string(binary[i] ,binary[j] )
if k is False:
__A : List[str] = """*"""
__A : Any = """*"""
temp.append("""X""" )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
__A : Optional[Any] = list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Sequence[float] ) -> list[str]:
__A : List[str] = []
for minterm in minterms:
__A : List[Any] = """"""
for _ in range(a__ ):
__A : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : int ) -> bool:
__A : Optional[Any] = list(a__ )
__A : Tuple = list(a__ )
__A : Any = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ,a__ : list[str] ) -> list[str]:
__A : Optional[int] = []
__A : Tuple = [0] * len(a__ )
for i in range(len(chart[0] ) ):
__A : str = 0
__A : Any = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
__A : Optional[Any] = j
if count == 1:
__A : int = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
__A : List[str] = 0
temp.append(prime_implicants[i] )
while True:
__A : Optional[Any] = 0
__A : Any = -1
__A : int = 0
for i in range(len(a__ ) ):
__A : List[Any] = chart[i].count(1 )
if count_n > max_n:
__A : Dict = count_n
__A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
__A : Union[str, Any] = 0
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ,a__ : list[str] ) -> list[list[int]]:
__A : Any = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
__A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,a__ ):
__A : Union[str, Any] = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = int(input("""Enter the no. of variables\n""" ) )
__A : List[str] = [
float(a__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__A : Dict = decimal_to_binary(a__ ,a__ )
__A : Union[str, Any] = check(a__ )
print("""Prime Implicants are:""" )
print(a__ )
__A : Optional[Any] = prime_implicant_chart(a__ ,a__ )
__A : Any = selection(a__ ,a__ )
print("""Essential Prime Implicants are:""" )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ : Dict = logging.get_logger(__name__)
A_ : Any = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''detr'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=1_0_0 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case__ : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = backbone_config.get("""model_type""" )
snake_case__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
snake_case__ : Dict = config_class.from_dict(__SCREAMING_SNAKE_CASE )
# set timm attributes to None
snake_case__ , snake_case__ , snake_case__ : int = None, None, None
snake_case__ : List[Any] = use_timm_backbone
snake_case__ : Optional[int] = backbone_config
snake_case__ : Tuple = num_channels
snake_case__ : str = num_queries
snake_case__ : Optional[int] = d_model
snake_case__ : Tuple = encoder_ffn_dim
snake_case__ : Union[str, Any] = encoder_layers
snake_case__ : List[str] = encoder_attention_heads
snake_case__ : Dict = decoder_ffn_dim
snake_case__ : Any = decoder_layers
snake_case__ : Optional[int] = decoder_attention_heads
snake_case__ : List[Any] = dropout
snake_case__ : List[str] = attention_dropout
snake_case__ : Tuple = activation_dropout
snake_case__ : List[str] = activation_function
snake_case__ : List[str] = init_std
snake_case__ : Optional[Any] = init_xavier_std
snake_case__ : int = encoder_layerdrop
snake_case__ : Any = decoder_layerdrop
snake_case__ : int = encoder_layers
snake_case__ : Dict = auxiliary_loss
snake_case__ : Dict = position_embedding_type
snake_case__ : Tuple = backbone
snake_case__ : str = use_pretrained_backbone
snake_case__ : Union[str, Any] = dilation
# Hungarian matcher
snake_case__ : int = class_cost
snake_case__ : Optional[int] = bbox_cost
snake_case__ : Dict = giou_cost
# Loss coefficients
snake_case__ : List[str] = mask_loss_coefficient
snake_case__ : Optional[Any] = dice_loss_coefficient
snake_case__ : Optional[Any] = bbox_loss_coefficient
snake_case__ : List[Any] = giou_loss_coefficient
snake_case__ : Optional[int] = eos_coefficient
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def __UpperCamelCase ( self ):
return self.encoder_attention_heads
@property
def __UpperCamelCase ( self ):
return self.d_model
@classmethod
def __UpperCamelCase ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return cls(backbone_config=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case__ : Union[str, Any] = self.backbone_config.to_dict()
snake_case__ : Tuple = self.__class__.model_type
return output
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-5
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : Any ) -> Optional[int]: # noqa: E741
while r - l > 1:
__A : Any = (l + r) // 2
if v[m] >= key:
__A : Optional[int] = m
else:
__A : List[Any] = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ) -> int:
if len(a__ ) == 0:
return 0
__A : str = [0] * len(a__ )
__A : List[str] = 1
__A : List[Any] = v[0]
for i in range(1 ,len(a__ ) ):
if v[i] < tail[0]:
__A : int = v[i]
elif v[i] > tail[length - 1]:
__A : Union[str, Any] = v[i]
length += 1
else:
__A : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class snake_case_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any]=1_3 , _UpperCamelCase : Dict=7 , _UpperCamelCase : List[str]=True , _UpperCamelCase : Any=True , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : str=9_9 , _UpperCamelCase : str=3_2 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : Optional[Any]=4 , _UpperCamelCase : Any=3_7 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Union[str, Any]=5_1_2 , _UpperCamelCase : Optional[int]=1_6 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : Optional[Any]=0.02 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Optional[Any]=4 , _UpperCamelCase : List[str]=None , ) ->Tuple:
snake_case_ = parent
snake_case_ = 1_3
snake_case_ = 7
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = 9_9
snake_case_ = 3_8_4
snake_case_ = 2
snake_case_ = 4
snake_case_ = 3_7
snake_case_ = '''gelu'''
snake_case_ = 0.1
snake_case_ = 0.1
snake_case_ = 5_1_2
snake_case_ = 1_6
snake_case_ = 2
snake_case_ = 0.02
snake_case_ = 3
snake_case_ = 4
snake_case_ = 1_2_8
snake_case_ = 2
snake_case_ = 9
snake_case_ = 1
snake_case_ = None
def snake_case__( self : Optional[Any] ) ->Optional[int]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ) ->Optional[int]:
snake_case_ = TFConvBertModel(config=_UpperCamelCase )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = [input_ids, input_mask]
snake_case_ = model(_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__( self : str , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] ) ->Optional[int]:
snake_case_ = TFConvBertForMaskedLM(config=_UpperCamelCase )
snake_case_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__( self : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : List[Any] ) ->Dict:
snake_case_ = self.num_labels
snake_case_ = TFConvBertForSequenceClassification(config=_UpperCamelCase )
snake_case_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] ) ->str:
snake_case_ = self.num_choices
snake_case_ = TFConvBertForMultipleChoice(config=_UpperCamelCase )
snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__( self : Any , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] ) ->str:
snake_case_ = self.num_labels
snake_case_ = TFConvBertForTokenClassification(config=_UpperCamelCase )
snake_case_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] ) ->Tuple:
snake_case_ = TFConvBertForQuestionAnswering(config=_UpperCamelCase )
snake_case_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : Dict = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : int = False
def snake_case__( self : str ) ->Union[str, Any]:
snake_case_ = TFConvBertModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def snake_case__( self : List[str] ) ->Optional[Any]:
self.config_tester.run_common_tests()
def snake_case__( self : str ) ->Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : Optional[Any] ) ->List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def snake_case__( self : str ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCamelCase )
def snake_case__( self : int ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def snake_case__( self : str ) ->Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def snake_case__( self : str ) ->str:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
snake_case_ = True
if hasattr(_UpperCamelCase , '''use_cache''' ):
snake_case_ = True
snake_case_ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
snake_case_ = getattr(self.model_tester , '''key_length''' , _UpperCamelCase )
for model_class in self.all_model_classes:
snake_case_ = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = len(model(_UpperCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCamelCase , saved_model=_UpperCamelCase )
snake_case_ = os.path.join(_UpperCamelCase , '''saved_model''' , '''1''' )
snake_case_ = tf.keras.models.load_model(_UpperCamelCase )
snake_case_ = model(_UpperCamelCase )
if self.is_encoder_decoder:
snake_case_ = outputs['''encoder_hidden_states''']
snake_case_ = outputs['''encoder_attentions''']
else:
snake_case_ = outputs['''hidden_states''']
snake_case_ = outputs['''attentions''']
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
snake_case_ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def snake_case__( self : Union[str, Any] ) ->Tuple:
snake_case_ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(_UpperCamelCase )
def snake_case__( self : List[str] ) ->Any:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
snake_case_ = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
snake_case_ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
snake_case_ = getattr(self.model_tester , '''key_length''' , _UpperCamelCase )
snake_case_ = getattr(self.model_tester , '''key_length''' , _UpperCamelCase )
def check_decoder_attentions_output(_UpperCamelCase : List[Any] ):
snake_case_ = len(_UpperCamelCase )
self.assertEqual(out_len % 2 , 0 )
snake_case_ = outputs.decoder_attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCamelCase : Dict ):
snake_case_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ = len(_UpperCamelCase )
self.assertEqual(config.output_hidden_states , _UpperCamelCase )
check_encoder_attentions_output(_UpperCamelCase )
if self.is_encoder_decoder:
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCamelCase )
check_decoder_attentions_output(_UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCamelCase )
check_encoder_attentions_output(_UpperCamelCase )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCamelCase )
check_encoder_attentions_output(_UpperCamelCase )
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__( self : Any ) ->int:
snake_case_ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
snake_case_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(_UpperCamelCase )[0]
snake_case_ = [1, 6, 7_6_8]
self.assertEqual(output.shape , _UpperCamelCase )
snake_case_ = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) | 39 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 0 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCAmelCase__ = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Path ,lowercase__ : Union[str, None] = None ,lowercase__ : Union[List[str], None] = None ,lowercase__ : Union[str, List[str], None] = None ,lowercase__ : bool = True ,):
__lowercase = [file for file in os.listdir(lowercase__ ) if os.path.isfile(os.path.join(lowercase__ ,lowercase__ ) )]
if identifier is not None:
__lowercase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase__ ,lowercase__ ):
for n_ in n_identifier:
__lowercase = [file for file in files if n_ not in file]
else:
__lowercase = [file for file in files if n_identifier not in file]
__lowercase = ignore_files or []
ignore_files.append('''__init__.py''' )
__lowercase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' ,lowercase__ )
if only_modules:
__lowercase = file.split('''.''' )[0]
try:
__lowercase = getattr(lowercase__ ,lowercase__ )
__lowercase = doctest.DocTestSuite(lowercase__ )
__lowercase = unittest.TextTestRunner().run(lowercase__ )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"{module_identifier} is not a module." )
else:
__lowercase = doctest.testfile(str('''..''' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = Path('''src/transformers''' )
__lowercase = '''modeling'''
__lowercase = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(lowercase__ ,identifier=lowercase__ ,ignore_files=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = Path('''src/transformers''' )
__lowercase = '''tokenization'''
self.analyze_directory(lowercase__ ,identifier=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = Path('''src/transformers''' )
__lowercase = '''configuration'''
self.analyze_directory(lowercase__ ,identifier=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = Path('''src/transformers''' )
__lowercase = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(lowercase__ ,n_identifier=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = Path('''docs/source''' )
__lowercase = ['''favicon.ico''']
self.analyze_directory(lowercase__ ,ignore_files=lowercase__ ,only_modules=lowercase__ )
| 41 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _UpperCamelCase ( __UpperCamelCase ) -> list[tuple[int, int]]:
lowerCamelCase_ = 0
lowerCamelCase_ = len(__UpperCamelCase ) # No of vertices in graph
lowerCamelCase_ = [0] * n
lowerCamelCase_ = [False] * n
def dfs(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
lowerCamelCase_ = True
lowerCamelCase_ = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,id_ )
lowerCamelCase_ = min(low[at] ,low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCamelCase_ = min(low[at] ,low[to] )
lowerCamelCase_ = []
for i in range(__UpperCamelCase ):
if not visited[i]:
dfs(__UpperCamelCase ,-1 ,__UpperCamelCase ,id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> List[Any]:
# Initialise PyTorch model
__A : Dict = MobileBertConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__A : Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
__A : Dict = load_tf_weights_in_mobilebert(a__ ,a__ ,a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 17 | 0 |
from __future__ import annotations
from collections.abc import Generator
def _a ( ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = 2
while True:
lowercase__ = factor_map.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if factor:
lowercase__ = factor + prime
while x in factor_map:
x += factor
lowercase__ = factor
else:
lowercase__ = prime
yield prime
prime += 1
def _a ( SCREAMING_SNAKE_CASE = 1E10 ):
"""simple docstring"""
lowercase__ = sieve()
lowercase__ = 1
while True:
lowercase__ = next(SCREAMING_SNAKE_CASE )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(SCREAMING_SNAKE_CASE )
n += 2
if __name__ == "__main__":
print(solution())
| 43 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'glpn'
def __init__( self : Tuple,__A : Optional[int]=3,__A : Optional[int]=4,__A : str=[2, 2, 2, 2],__A : Union[str, Any]=[8, 4, 2, 1],__A : Tuple=[3_2, 6_4, 1_6_0, 2_5_6],__A : int=[7, 3, 3, 3],__A : str=[4, 2, 2, 2],__A : int=[1, 2, 5, 8],__A : List[Any]=[4, 4, 4, 4],__A : Optional[int]="gelu",__A : int=0.0,__A : Tuple=0.0,__A : Tuple=0.02,__A : Optional[int]=0.1,__A : Optional[int]=1e-6,__A : Optional[int]=6_4,__A : Optional[Any]=1_0,__A : Tuple=-1,**__A : List[str],):
super().__init__(**__A )
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = num_encoder_blocks
_lowerCamelCase : Dict = depths
_lowerCamelCase : List[Any] = sr_ratios
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Any = patch_sizes
_lowerCamelCase : Any = strides
_lowerCamelCase : Dict = mlp_ratios
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index | 44 |
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17 | 0 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :str , lowerCamelCase__ :Dict ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
UpperCamelCase__ :Optional[Any] = deepcopy(lowerCamelCase__ )
elif os.path.exists(lowerCamelCase__ ):
with io.open(lowerCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase__ :int = json.load(lowerCamelCase__ )
else:
try:
UpperCamelCase__ :int = baseaa.urlsafe_baadecode(lowerCamelCase__ ).decode("""utf-8""" )
UpperCamelCase__ :Union[str, Any] = json.loads(lowerCamelCase__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
UpperCamelCase__ :Tuple = config
self.set_stage_and_offload()
def __a ( self :Tuple ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
UpperCamelCase__ :Dict = self.get_value("""zero_optimization.stage""" , -1 )
# offload
UpperCamelCase__ :Tuple = False
if self.is_zeroa() or self.is_zeroa():
UpperCamelCase__ :Optional[int] = set(["""cpu""", """nvme"""] )
UpperCamelCase__ :Optional[Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
UpperCamelCase__ :Optional[int] = True
def __a ( self :Union[str, Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Tuple = self.config
# find the config node of interest if it exists
UpperCamelCase__ :Any = ds_key_long.split(""".""" )
UpperCamelCase__ :int = nodes.pop()
for node in nodes:
UpperCamelCase__ :Dict = config.get(lowerCamelCase__ )
if config is None:
return None, ds_key
return config, ds_key
def __a ( self :Dict , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any]=None ):
UpperCamelCase__ , UpperCamelCase__ :Dict = self.find_config_node(lowerCamelCase__ )
if config is None:
return default
return config.get(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Tuple=False ):
UpperCamelCase__ :Tuple = self.config
# find the config node of interest if it exists
UpperCamelCase__ :Optional[Any] = ds_key_long.split(""".""" )
for node in nodes:
UpperCamelCase__ :Any = config
UpperCamelCase__ :Optional[Any] = config.get(lowerCamelCase__ )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowerCamelCase__ )
def __a ( self :Any , lowerCamelCase__ :str ):
UpperCamelCase__ :Any = self.get_value(lowerCamelCase__ )
return False if value is None else bool(lowerCamelCase__ )
def __a ( self :List[Any] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :Dict = self.get_value(lowerCamelCase__ )
return False if value is None else not bool(lowerCamelCase__ )
def __a ( self :Dict ):
return self._stage == 2
def __a ( self :List[str] ):
return self._stage == 3
def __a ( self :Optional[int] ):
return self._offload
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :int ):
UpperCamelCase__ :Union[str, Any] = engine
def __a ( self :str , lowerCamelCase__ :List[str] , **lowerCamelCase__ :Optional[int] ):
# runs backpropagation and handles mixed precision
self.engine.backward(lowerCamelCase__ , **lowerCamelCase__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :Tuple ):
super().__init__(lowerCamelCase__ , device_placement=lowerCamelCase__ , scaler=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = hasattr(self.optimizer , """overflow""" )
def __a ( self :int , lowerCamelCase__ :str=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __a ( self :List[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __a ( self :List[Any] ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict=0.001 , lowerCamelCase__ :Optional[int]=0 , **lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :Any = params
UpperCamelCase__ :Tuple = lr
UpperCamelCase__ :Optional[int] = weight_decay
UpperCamelCase__ :Optional[Any] = kwargs
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :int , lowerCamelCase__ :int=None , lowerCamelCase__ :List[str]=0 , **lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Optional[Any] = optimizer
UpperCamelCase__ :List[Any] = total_num_steps
UpperCamelCase__ :Any = warmup_num_steps
UpperCamelCase__ :Any = kwargs | 45 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCAmelCase__ = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCAmelCase__ = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCAmelCase__ = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCAmelCase__ = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCAmelCase__ = field(
default=1_0_0_0_0 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCAmelCase__ = field(default=2E-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCAmelCase__ = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCAmelCase__ = field(
default=7_5_0 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCAmelCase__ = field(
default=1_6 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCAmelCase__ = field(default=5_0_0_0_0 , metadata={'help': 'Maximum number of training steps.'} )
lowerCAmelCase__ = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCAmelCase__ = field(default=1_0_2_4 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCAmelCase__ = field(
default=1_0_2_4 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCAmelCase__ = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCAmelCase__ = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCAmelCase__ = field(default=1_0_2_4 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCAmelCase__ = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCAmelCase__ = field(default=2_5_6 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCAmelCase__ = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCAmelCase__ = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCAmelCase__ = field(default=1_0 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCAmelCase__ = field(
default=2_0_0 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCAmelCase__ = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCAmelCase__ = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCAmelCase__ = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default=_a , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCAmelCase__ = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCAmelCase__ = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCAmelCase__ = field(
default=1_0_0_0_0_0 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCAmelCase__ = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCAmelCase__ = field(
default=1_0_0_0 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCAmelCase__ = field(
default=1_0_0 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCAmelCase__ = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCAmelCase__ = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCAmelCase__ = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCAmelCase__ = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCAmelCase__ = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCAmelCase__ = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCAmelCase__ = field(default=2_0_0_0_0_0 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCAmelCase__ = field(
default=3_2_7_6_8 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCAmelCase__ = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCAmelCase__ = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCAmelCase__ = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCAmelCase__ = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 46 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''xlm-roberta-xl'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=2_5_0_8_8_0 , SCREAMING_SNAKE_CASE__ : Any=2_5_6_0 , SCREAMING_SNAKE_CASE__ : List[Any]=3_6 , SCREAMING_SNAKE_CASE__ : Tuple=3_2 , SCREAMING_SNAKE_CASE__ : str=1_0_2_4_0 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=5_1_4 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1e-05 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : int="absolute" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = vocab_size
__a : int = hidden_size
__a : Any = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : Any = hidden_act
__a : int = intermediate_size
__a : int = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Any = initializer_range
__a : Any = layer_norm_eps
__a : List[str] = position_embedding_type
__a : Dict = use_cache
__a : Dict = classifier_dropout
class _UpperCamelCase( __lowerCamelCase ):
@property
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 47 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 0 |
'''simple docstring'''
from __future__ import annotations
def A ( UpperCamelCase_ : int | float | str , UpperCamelCase_ : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
lowerCAmelCase__ = int(UpperCamelCase_ )
lowerCAmelCase__ = int(UpperCamelCase_ )
lowerCAmelCase__ = []
for temp in range(int(UpperCamelCase_ ) ):
series.append(F"""1 / {pow(temp + 1 , int(UpperCamelCase_ ) )}""" if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = int(input("Enter the last number (nth term) of the P-Series"))
UpperCAmelCase__ : List[Any] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 48 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_lowercase : Dict = False
try:
_lowercase : Union[str, Any] = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class _UpperCAmelCase :
def __init__( self : Optional[int] , _lowercase : str = None , _lowercase : list = [] ):
__UpperCAmelCase = 0
__UpperCAmelCase = choices
__UpperCAmelCase = prompt
if sys.platform == "win32":
__UpperCAmelCase = '''*'''
else:
__UpperCAmelCase = '''➔ '''
def a ( self : Dict , _lowercase : Dict , _lowercase : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _lowercase )
else:
forceWrite(self.choices[index] , _lowercase )
def a ( self : List[str] , _lowercase : int ):
if index == self.position:
forceWrite(F''' {self.arrow_char} ''' )
self.write_choice(_lowercase )
else:
forceWrite(F''' {self.choices[index]}''' )
reset_cursor()
def a ( self : List[Any] , _lowercase : Direction , _lowercase : int = 1 ):
__UpperCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowercase )
move_cursor(_lowercase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def a ( self : Any ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def a ( self : Any ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def a ( self : str ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def a ( self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowercase )] for number in range(10 )] )
def a ( self : Tuple ):
__UpperCAmelCase = int(chr(self.current_selection ) )
__UpperCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _lowercase )
else:
return
else:
return
def a ( self : str , _lowercase : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
__UpperCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowercase )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
__UpperCAmelCase = int(builtins.input() )
except ValueError:
__UpperCAmelCase = default_choice
else:
__UpperCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(_lowercase , '''\n''' )
return choice
| 49 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'cvt'
def __init__( self ,_lowerCAmelCase=3 ,_lowerCAmelCase=[7, 3, 3] ,_lowerCAmelCase=[4, 2, 2] ,_lowerCAmelCase=[2, 1, 1] ,_lowerCAmelCase=[64, 1_92, 3_84] ,_lowerCAmelCase=[1, 3, 6] ,_lowerCAmelCase=[1, 2, 10] ,_lowerCAmelCase=[4.0, 4.0, 4.0] ,_lowerCAmelCase=[0.0, 0.0, 0.0] ,_lowerCAmelCase=[0.0, 0.0, 0.0] ,_lowerCAmelCase=[0.0, 0.0, 0.1] ,_lowerCAmelCase=[True, True, True] ,_lowerCAmelCase=[False, False, True] ,_lowerCAmelCase=["dw_bn", "dw_bn", "dw_bn"] ,_lowerCAmelCase=[3, 3, 3] ,_lowerCAmelCase=[1, 1, 1] ,_lowerCAmelCase=[2, 2, 2] ,_lowerCAmelCase=[1, 1, 1] ,_lowerCAmelCase=[1, 1, 1] ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = num_channels
lowerCamelCase__ = patch_sizes
lowerCamelCase__ = patch_stride
lowerCamelCase__ = patch_padding
lowerCamelCase__ = embed_dim
lowerCamelCase__ = num_heads
lowerCamelCase__ = depth
lowerCamelCase__ = mlp_ratio
lowerCamelCase__ = attention_drop_rate
lowerCamelCase__ = drop_rate
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = cls_token
lowerCamelCase__ = qkv_projection_method
lowerCamelCase__ = kernel_qkv
lowerCamelCase__ = padding_kv
lowerCamelCase__ = stride_kv
lowerCamelCase__ = padding_q
lowerCamelCase__ = stride_q
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
| 50 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
UpperCAmelCase = model.config
UpperCAmelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
UpperCAmelCase = MBartConfig(
is_decoder=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=SCREAMING_SNAKE_CASE_ , add_final_layer_norm=SCREAMING_SNAKE_CASE_ , )
return encoder_config, decoder_config
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if "encoder.model" in name:
UpperCAmelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
UpperCAmelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCAmelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
UpperCAmelCase = '''encoder.''' + name
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
UpperCAmelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
UpperCAmelCase = '''encoder.layernorm.bias'''
return name
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[3] )
UpperCAmelCase = int(key_split[5] )
UpperCAmelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[dim : dim * 2, :]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
UpperCAmelCase = val
return orig_state_dict
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = DonutModel.from_pretrained(SCREAMING_SNAKE_CASE_ ).eval()
# load HuggingFace model
UpperCAmelCase, UpperCAmelCase = get_configs(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = DonutSwinModel(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = MBartForCausalLM(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = VisionEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = original_model.state_dict()
UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify results on scanned document
UpperCAmelCase = load_dataset('''hf-internal-testing/example-documents''' )
UpperCAmelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE_ , from_slow=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
UpperCAmelCase = DonutProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
UpperCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCAmelCase = '''When is the coffee break?'''
UpperCAmelCase = task_prompt.replace('''{user_input}''' , SCREAMING_SNAKE_CASE_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
UpperCAmelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
UpperCAmelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
UpperCAmelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
UpperCAmelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
UpperCAmelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
UpperCAmelCase = original_model.decoder.tokenizer(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )[
'''input_ids'''
]
UpperCAmelCase = original_model.encoder.model.patch_embed(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = model.encoder.embeddings(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
# verify encoder hidden states
UpperCAmelCase = original_model.encoder(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = model.encoder(SCREAMING_SNAKE_CASE_ ).last_hidden_state
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
# verify decoder hidden states
UpperCAmelCase = original_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).logits
UpperCAmelCase = model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
a__ : Optional[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ : Optional[Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __SCREAMING_SNAKE_CASE ( a__ : str=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) )
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[int] = None
_lowercase : str = None
def lowerCAmelCase_ ( self : Dict , __A : Optional[int] , __A : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
__A : List[Any] = dataset_module_factory(__A , cache_dir=__A )
__A : Tuple = import_main_class(dataset_module.module_path , dataset=__A )
__A : DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
__A : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__A : Union[str, Any] = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]:
__A : Optional[Any] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__A : Union[str, Any] = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : List[Any] = import_main_class(dataset_module.module_path )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__A : Any = None
builder_instance.download_and_prepare()
__A : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> List[str]:
__A : Tuple = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : str = import_main_class(dataset_module.module_path ,dataset=a__ )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
__A : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a__ ,a__ )
assert "train" in ds
assert isinstance(ds["""train"""] ,a__ )
assert next(iter(ds["""train"""] ) )
| 17 | 0 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''efficientformer'''
def __init__( self , _UpperCAmelCase = [3, 2, 6, 4] , _UpperCAmelCase = [48, 96, 224, 448] , _UpperCAmelCase = [True, True, True, True] , _UpperCAmelCase = 448 , _UpperCAmelCase = 32 , _UpperCAmelCase = 4 , _UpperCAmelCase = 7 , _UpperCAmelCase = 5 , _UpperCAmelCase = 8 , _UpperCAmelCase = 4 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 16 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 2 , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = 1e-5 , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 1e-1_2 , _UpperCAmelCase = 224 , _UpperCAmelCase = 1e-0_5 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Optional[int] = hidden_act
__a : List[str] = hidden_dropout_prob
__a : List[str] = hidden_sizes
__a : List[Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : List[Any] = initializer_range
__a : Optional[int] = layer_norm_eps
__a : Union[str, Any] = patch_size
__a : Dict = num_channels
__a : Dict = depths
__a : Optional[int] = mlp_expansion_ratio
__a : Any = downsamples
__a : Any = dim
__a : Dict = key_dim
__a : Dict = attention_ratio
__a : Any = resolution
__a : str = pool_size
__a : List[Any] = downsample_patch_size
__a : Any = downsample_stride
__a : str = downsample_pad
__a : Union[str, Any] = drop_path_rate
__a : str = num_metaad_blocks
__a : List[Any] = distillation
__a : str = use_layer_scale
__a : Optional[int] = layer_scale_init_value
__a : Optional[Any] = image_size
__a : Optional[int] = batch_norm_eps | 52 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Tuple ):
# Load checkpoint
__lowerCAmelCase = torch.load(lowerCAmelCase_, map_location='cpu' )
__lowerCAmelCase = chkpt['model']
# We have the base model one level deeper than the original XLM repository
__lowerCAmelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__lowerCAmelCase = v
else:
__lowerCAmelCase = v
__lowerCAmelCase = chkpt['params']
__lowerCAmelCase = {n: v for n, v in config.items() if not isinstance(lowerCAmelCase_, (torch.FloatTensor, numpy.ndarray) )}
__lowerCAmelCase = chkpt['dico_word2id']
__lowerCAmelCase = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@', '' ): i for s, i in vocab.items()}
# Save pytorch-model
__lowerCAmelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__lowerCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
__lowerCAmelCase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase_, lowerCAmelCase_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, indent=2 ) + '\n' )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase_, indent=2 ) + '\n' )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : Any = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 53 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Tuple ={
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict =[
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE :Optional[Any] = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Tuple = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[str] = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[Any] = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17 | 0 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a : Optional[Any] = 100
_a : Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a (lowercase__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case = set()
__snake_case = 42
__snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a (lowercase__ : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Union[str, Any] = 'ylacombe/bark-small'
UpperCamelCase_: Optional[int] = tempfile.mkdtemp()
UpperCamelCase_: Dict = 'en_speaker_1'
UpperCamelCase_: List[Any] = 'This is a test string'
UpperCamelCase_: Tuple = 'speaker_embeddings_path.json'
UpperCamelCase_: Tuple = 'speaker_embeddings'
def _a ( self , **_lowerCamelCase ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCamelCase )
def _a ( self ):
shutil.rmtree(self.tmpdirname )
def _a ( self ):
UpperCamelCase_: int = self.get_tokenizer()
UpperCamelCase_: Dict = BarkProcessor(tokenizer=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _a ( self ):
UpperCamelCase_: Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase_: Optional[int] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCamelCase_: int = 3_5
UpperCamelCase_: Optional[int] = 2
UpperCamelCase_: int = 8
UpperCamelCase_: Union[str, Any] = {
'semantic_prompt': np.ones(_lowerCamelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCamelCase_: Dict = processor(text=self.input_string , voice_preset=_lowerCamelCase )
UpperCamelCase_: str = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCamelCase_: Tuple = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: List[Any] = processor(text=self.input_string , voice_preset=_lowerCamelCase )
UpperCamelCase_: Tuple = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCamelCase_: int = processor(text=self.input_string , voice_preset=self.voice_preset )
def _a ( self ):
UpperCamelCase_: Tuple = self.get_tokenizer()
UpperCamelCase_: str = BarkProcessor(tokenizer=_lowerCamelCase )
UpperCamelCase_: List[str] = processor(text=self.input_string )
UpperCamelCase_: List[Any] = tokenizer(
self.input_string , padding='max_length' , max_length=2_5_6 , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 57 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=1_8 , _lowercase=3_0 , _lowercase=4_0_0 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=[0.5, 0.5, 0.5] , _lowercase=[0.5, 0.5, 0.5] , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = size if size is not None else {"""shortest_edge""": 1_8}
snake_case_ : str = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : List[str] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Any = image_size
snake_case_ : int = min_resolution
snake_case_ : Tuple = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : int = size
snake_case_ : Union[str, Any] = do_center_crop
snake_case_ : Union[str, Any] = crop_size
snake_case_ : Optional[int] = do_normalize
snake_case_ : List[str] = image_mean
snake_case_ : Optional[Any] = image_std
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = LevitImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = LevitImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
snake_case_ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
snake_case_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : str = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
snake_case_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : Dict = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 58 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : int , __A : Tuple , __A : List[Any] ):
__A : Optional[int] = None
__A : Any = None
__A : int = graph
self._normalize_graph(__A , __A )
__A : str = len(__A )
__A : Optional[int] = None
def lowerCAmelCase_ ( self : int , __A : Any , __A : Optional[Any] ):
if sources is int:
__A : Dict = [sources]
if sinks is int:
__A : Optional[int] = [sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
__A : str = sources[0]
__A : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
__A : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : str = max_input_flow
__A : Union[str, Any] = 0
__A : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : int = max_input_flow
__A : Optional[Any] = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict ):
__A : Dict = algorithm(self )
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : str ):
__A : Any = flow_network
__A : int = flow_network.verticesCount
__A : List[Any] = flow_network.sourceIndex
__A : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : str = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Any = True
def lowerCAmelCase_ ( self : List[str] ):
pass
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Any , __A : List[str] ):
super().__init__(__A )
# use this to save your result
__A : str = -1
def lowerCAmelCase_ ( self : Any ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Dict ):
super().__init__(__A )
__A : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Optional[Any] = [0] * self.verticies_count
__A : Union[str, Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Dict = 0
while i < len(__A ):
__A : List[Any] = vertices_list[i]
__A : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
__A : Any = 0
else:
i += 1
__A : Optional[int] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def lowerCAmelCase_ ( self : Dict , __A : List[str] , __A : Optional[Any] ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Dict = self.heights[to_index]
if min_height is not None:
__A : Optional[int] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["image_processor", "tokenizer"]
lowercase_ = "CLIPImageProcessor"
lowercase_ = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__(self : List[Any] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : List[str]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase_ , )
lowerCamelCase__: int =kwargs.pop("feature_extractor")
lowerCamelCase__: int =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def __call__(self : List[Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : Any) ->Union[str, Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
lowerCamelCase__: List[Any] =self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
if images is not None:
lowerCamelCase__: int =self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
if text is not None and images is not None:
lowerCamelCase__: str =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any]) ->Dict:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Any) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_)
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.tokenizer.model_input_names
lowerCamelCase__: str =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 59 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str | Literal[False]:
__A : Tuple = list(a__ )
__A : Optional[int] = list(a__ )
__A : int = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
__A : int = """_"""
if count > 1:
return False
else:
return "".join(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ) -> list[str]:
__A : Optional[Any] = []
while True:
__A : Tuple = ["""$"""] * len(a__ )
__A : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 ,len(a__ ) ):
__A : int = compare_string(binary[i] ,binary[j] )
if k is False:
__A : List[str] = """*"""
__A : Any = """*"""
temp.append("""X""" )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
__A : Optional[Any] = list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Sequence[float] ) -> list[str]:
__A : List[str] = []
for minterm in minterms:
__A : List[Any] = """"""
for _ in range(a__ ):
__A : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : int ) -> bool:
__A : Optional[Any] = list(a__ )
__A : Tuple = list(a__ )
__A : Any = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ,a__ : list[str] ) -> list[str]:
__A : Optional[int] = []
__A : Tuple = [0] * len(a__ )
for i in range(len(chart[0] ) ):
__A : str = 0
__A : Any = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
__A : Optional[Any] = j
if count == 1:
__A : int = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
__A : List[str] = 0
temp.append(prime_implicants[i] )
while True:
__A : Optional[Any] = 0
__A : Any = -1
__A : int = 0
for i in range(len(a__ ) ):
__A : List[Any] = chart[i].count(1 )
if count_n > max_n:
__A : Dict = count_n
__A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
__A : Union[str, Any] = 0
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ,a__ : list[str] ) -> list[list[int]]:
__A : Any = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
__A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,a__ ):
__A : Union[str, Any] = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = int(input("""Enter the no. of variables\n""" ) )
__A : List[str] = [
float(a__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__A : Dict = decimal_to_binary(a__ ,a__ )
__A : Union[str, Any] = check(a__ )
print("""Prime Implicants are:""" )
print(a__ )
__A : Optional[Any] = prime_implicant_chart(a__ ,a__ )
__A : Any = selection(a__ ,a__ )
print("""Essential Prime Implicants are:""" )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 | 0 |
import os
from pathlib import Path
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
from torch.utils.cpp_extension import load
snake_case_ : List[str] = Path(_UpperCamelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
snake_case_ : Any = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , _UpperCamelCase , with_cuda=_UpperCamelCase , extra_include_paths=[str(_UpperCamelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 60 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : Any ) -> Optional[int]: # noqa: E741
while r - l > 1:
__A : Any = (l + r) // 2
if v[m] >= key:
__A : Optional[int] = m
else:
__A : List[Any] = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ) -> int:
if len(a__ ) == 0:
return 0
__A : str = [0] * len(a__ )
__A : List[str] = 1
__A : List[Any] = v[0]
for i in range(1 ,len(a__ ) ):
if v[i] < tail[0]:
__A : int = v[i]
elif v[i] > tail[length - 1]:
__A : Union[str, Any] = v[i]
length += 1
else:
__A : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCamelCase = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "retribert"
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=30_522 , SCREAMING_SNAKE_CASE__ : Optional[Any]=768 , SCREAMING_SNAKE_CASE__ : Optional[int]=8 , SCREAMING_SNAKE_CASE__ : List[Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_072 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=512 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-1_2 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=128 , SCREAMING_SNAKE_CASE__ : str=0 , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Tuple:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = share_encoders
lowerCAmelCase__ = projection_dim
| 61 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
snake_case = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE : Any = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE : Dict = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE : List[Any] = "pretraining"
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : List[Any] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512}
SCREAMING_SNAKE_CASE : Tuple = "multichoice"
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : List[str] = {"visual_embedding_dim": 2048}
SCREAMING_SNAKE_CASE : str = "vqa_advanced"
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Tuple = {"visual_embedding_dim": 2048, "num_labels": 3129}
SCREAMING_SNAKE_CASE : Optional[Any] = "vqa"
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
SCREAMING_SNAKE_CASE : Tuple = "nlvr"
SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase )
# Load State Dict
SCREAMING_SNAKE_CASE : List[str] = load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Any = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE : int = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE : Tuple = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE : List[Any] = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 62 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 0 |
from collections.abc import Sequence
def lowerCamelCase__ ( __lowerCamelCase : Sequence[float] , __lowerCamelCase : bool = False ):
if not arr:
return 0
__UpperCAmelCase : Optional[int] = 0 if allow_empty_subarrays else float("""-inf""" )
__UpperCAmelCase : Optional[int] = 0.0
for num in arr:
__UpperCAmelCase : str = max(0 if allow_empty_subarrays else num , curr_sum + num )
__UpperCAmelCase : Dict = max(__lowerCamelCase , __lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
a : Dict = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 63 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[int] = logging.get_logger(__name__)
lowercase_ : Optional[Any] = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "vit_mae"
def __init__( self , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-12 , lowerCAmelCase=224 , lowerCAmelCase=16 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=16 , lowerCAmelCase=512 , lowerCAmelCase=8 , lowerCAmelCase=2048 , lowerCAmelCase=0.75 , lowerCAmelCase=False , **lowerCAmelCase , ) -> Optional[int]:
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= hidden_size
SCREAMING_SNAKE_CASE__: List[str]= num_hidden_layers
SCREAMING_SNAKE_CASE__: Dict= num_attention_heads
SCREAMING_SNAKE_CASE__: Any= intermediate_size
SCREAMING_SNAKE_CASE__: Tuple= hidden_act
SCREAMING_SNAKE_CASE__: Optional[int]= hidden_dropout_prob
SCREAMING_SNAKE_CASE__: Dict= attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__: Dict= initializer_range
SCREAMING_SNAKE_CASE__: List[Any]= layer_norm_eps
SCREAMING_SNAKE_CASE__: int= image_size
SCREAMING_SNAKE_CASE__: str= patch_size
SCREAMING_SNAKE_CASE__: str= num_channels
SCREAMING_SNAKE_CASE__: Tuple= qkv_bias
SCREAMING_SNAKE_CASE__: Union[str, Any]= decoder_num_attention_heads
SCREAMING_SNAKE_CASE__: Tuple= decoder_hidden_size
SCREAMING_SNAKE_CASE__: List[Any]= decoder_num_hidden_layers
SCREAMING_SNAKE_CASE__: Union[str, Any]= decoder_intermediate_size
SCREAMING_SNAKE_CASE__: List[Any]= mask_ratio
SCREAMING_SNAKE_CASE__: List[str]= norm_pix_loss
| 64 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> List[Any]:
# Initialise PyTorch model
__A : Dict = MobileBertConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__A : Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
__A : Dict = load_tf_weights_in_mobilebert(a__ ,a__ ,a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 17 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : str ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() ,encoding="""utf-8""" ,check=A ,)
assert hasattr(self ,"""env""" )
def __lowercase ( self : Any ,A : Any ):
'''simple docstring'''
# configuration for running training on smdistributed Model Parallel
UpperCAmelCase__ : Tuple = {
"""enabled""": True,
"""processes_per_host""": 8,
}
UpperCAmelCase__ : str = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
UpperCAmelCase__ : Optional[int] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
UpperCAmelCase__ : Union[str, Any] = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" ,instance_count=A ,instance_type=self.instance_type ,debugger_hook_config=A ,hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=A ,py_version="""py36""" ,)
def __lowercase ( self : Dict ,A : Tuple ):
'''simple docstring'''
TrainingJobAnalytics(A ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(1,)] )
def __lowercase ( self : Tuple ,A : Union[str, Any] ):
'''simple docstring'''
# create estimator
UpperCAmelCase__ : Optional[int] = self.create_estimator(A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : List[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
UpperCAmelCase__ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,A )
| 65 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
import numpy as np
UpperCamelCase = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class lowerCAmelCase_ :
def __init__( self ):
_lowercase : Any = np.array(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase , _lowercase : List[str] = np.where(letter == self.SQUARE )
_lowercase : Optional[int] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __a ( self , _lowerCAmelCase ):
_lowercase : int = message.lower()
_lowercase : Optional[Any] = message.replace(' ' , '' )
_lowercase : Optional[Any] = message.replace('j' , 'i' )
_lowercase : Optional[int] = np.empty((2, len(_lowerCAmelCase )) )
for letter_index in range(len(_lowerCAmelCase ) ):
_lowercase : Any = self.letter_to_numbers(message[letter_index] )
_lowercase : Tuple = numbers[0]
_lowercase : int = numbers[1]
_lowercase : int = first_step.reshape(2 * len(_lowerCAmelCase ) )
_lowercase : Tuple = ''
for numbers_index in range(len(_lowerCAmelCase ) ):
_lowercase : int = int(second_step[numbers_index * 2] )
_lowercase : List[Any] = int(second_step[(numbers_index * 2) + 1] )
_lowercase : Optional[int] = self.numbers_to_letter(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[Any] = encoded_message + letter
return encoded_message
def __a ( self , _lowerCAmelCase ):
_lowercase : Tuple = message.lower()
message.replace(' ' , '' )
_lowercase : Optional[Any] = np.empty(2 * len(_lowerCAmelCase ) )
for letter_index in range(len(_lowerCAmelCase ) ):
_lowercase : List[Any] = self.letter_to_numbers(message[letter_index] )
_lowercase : int = numbers[0]
_lowercase : Union[str, Any] = numbers[1]
_lowercase : Optional[int] = first_step.reshape((2, len(_lowerCAmelCase )) )
_lowercase : Tuple = ''
for numbers_index in range(len(_lowerCAmelCase ) ):
_lowercase : Tuple = int(second_step[0, numbers_index] )
_lowercase : Dict = int(second_step[1, numbers_index] )
_lowercase : List[Any] = self.numbers_to_letter(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Dict = decoded_message + letter
return decoded_message
| 66 |
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17 | 0 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] , snake_case__ :List[Any] , snake_case__ :Any=[] ) -> Any:
_lowercase = size[0] - overlap_pixels * 2
_lowercase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_lowercase = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
_lowercase = np.pad(snake_case__ , mode='linear_ramp' , pad_width=snake_case__ , end_values=0 )
if "l" in remove_borders:
_lowercase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_lowercase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_lowercase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_lowercase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] , snake_case__ :List[str] , snake_case__ :List[str] ) -> Dict:
return max(snake_case__ , min(snake_case__ , snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :[int] , snake_case__ :[int] , snake_case__ :[int] ) -> Optional[Any]:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :[int] , snake_case__ :int , snake_case__ :[int] ) -> Dict:
_lowercase = list(snake_case__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_lowercase = clamp_rect(snake_case__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :int , snake_case__ :Dict , snake_case__ :str ) -> Tuple:
_lowercase = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(snake_case__ , (original_slice, 0) )
return result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :Tuple ) -> List[str]:
_lowercase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_lowercase = tile.crop(snake_case__ )
return tile
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :Optional[int] ) -> Tuple:
_lowercase = n % d
return n - divisor
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str ,__A : AutoencoderKL ,__A : CLIPTextModel ,__A : CLIPTokenizer ,__A : UNetaDConditionModel ,__A : DDPMScheduler ,__A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,__A : int = 350 ,) -> Union[str, Any]:
super().__init__(
vae=__A ,text_encoder=__A ,tokenizer=__A ,unet=__A ,low_res_scheduler=__A ,scheduler=__A ,max_noise_level=__A ,)
def __UpperCAmelCase ( self : Dict ,__A : Dict ,__A : Tuple ,__A : List[Any] ,__A : str ,__A : Optional[Any] ,__A : Union[str, Any] ,__A : Optional[int] ,**__A : List[Any] ) -> int:
torch.manual_seed(0 )
_lowercase = (
min(image.size[0] - (tile_size + original_image_slice) ,x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) ,y * tile_size ),
min(image.size[0] ,(x + 1) * tile_size ),
min(image.size[1] ,(y + 1) * tile_size ),
)
_lowercase = add_overlap_rect(__A ,__A ,image.size )
_lowercase = image.crop(__A )
_lowercase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_lowercase = translated_slice_x - (original_image_slice / 2)
_lowercase = max(0 ,__A )
_lowercase = squeeze_tile(__A ,__A ,__A ,__A )
_lowercase = to_input.size
_lowercase = to_input.resize((tile_size, tile_size) ,Image.BICUBIC )
_lowercase = super(__A ,self ).__call__(image=__A ,**__A ).images[0]
_lowercase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) ,Image.BICUBIC )
_lowercase = unsqueeze_tile(__A ,__A )
_lowercase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) ,Image.BICUBIC )
_lowercase = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
_lowercase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) ,tile_border * 4 ,remove_borders=__A ) ,mode='L' ,)
final_image.paste(
__A ,(crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) ,__A )
@torch.no_grad()
def __call__( self : List[Any] ,__A : Union[str, List[str]] ,__A : Union[PIL.Image.Image, List[PIL.Image.Image]] ,__A : int = 75 ,__A : float = 9.0 ,__A : int = 50 ,__A : Optional[Union[str, List[str]]] = None ,__A : Optional[int] = 1 ,__A : float = 0.0 ,__A : Optional[torch.Generator] = None ,__A : Optional[torch.FloatTensor] = None ,__A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,__A : int = 1 ,__A : int = 128 ,__A : int = 32 ,__A : int = 32 ,) -> int:
_lowercase = Image.new('RGB' ,(image.size[0] * 4, image.size[1] * 4) )
_lowercase = math.ceil(image.size[0] / tile_size )
_lowercase = math.ceil(image.size[1] / tile_size )
_lowercase = tcx * tcy
_lowercase = 0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A ,__A ,__A ,__A ,__A ,__A ,__A ,prompt=__A ,num_inference_steps=__A ,guidance_scale=__A ,noise_level=__A ,negative_prompt=__A ,num_images_per_prompt=__A ,eta=__A ,generator=__A ,latents=__A ,)
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
# Run a demo
_lowercase = 'stabilityai/stable-diffusion-x4-upscaler'
_lowercase = StableDiffusionTiledUpscalePipeline.from_pretrained(snake_case__ , revision='fp16' , torch_dtype=torch.floataa )
_lowercase = pipe.to('cuda' )
_lowercase = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(snake_case__ :int ):
print(F"""progress: {obj["progress"]:.4f}""" )
obj["image"].save('diffusers_library_progress.jpg' )
_lowercase = pipe(image=snake_case__ , prompt='Black font, white background, vector' , noise_level=40 , callback=snake_case__ )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main() | 67 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__A = logging.get_logger(__name__)
__A = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
__A = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__A = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__A = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
__A = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
__A = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
__A = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
__A = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
__A = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
__A = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
__A = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
__A = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
__A = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
__A = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = FLAX_MODEL_MAPPING
__A = auto_class_update(FlaxAutoModel)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__A = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : int = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__A = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Dict = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__A = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__A = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Any = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__A = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__A = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Dict = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__A = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Tuple = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__A = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class _A ( _BaseAutoModelClass ):
"""simple docstring"""
lowerCamelCase : List[str] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__A = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 68 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( ) -> Dict:
__snake_case = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
__snake_case = Dataset.from_dict(_UpperCAmelCase )
return dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Any ):
"""simple docstring"""
__snake_case = get_dataset()
__snake_case = make_duplicate_clusters(a_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = get_dataset()
__snake_case , __snake_case = deduplicate_dataset(a_ )
self.assertEqual(len(a_ ) , 2 )
print(a_ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a_ )
| 69 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 70 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[list[str]] , _SCREAMING_SNAKE_CASE : int , ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_SCREAMING_SNAKE_CASE ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : list[list[str]] = []
depth_first_search([] , [] , [] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Print all the boards
for board in boards:
for column in board:
print(_SCREAMING_SNAKE_CASE )
print("" )
print(len(_SCREAMING_SNAKE_CASE ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 71 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
'''simple docstring'''
import math
class __magic_name__ :
def _A( self , snake_case_ , snake_case_ ):
lowercase =0.0
lowercase =0.0
for i in range(len(snake_case_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
for i in range(len(snake_case_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase =[[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowercase =[[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowercase =SelfOrganizingMap()
lowercase =3
lowercase =0.5
for _ in range(lowercase_ ):
for j in range(len(lowercase_ ) ):
# training sample
lowercase =training_samples[j]
# Compute the winning vector
lowercase =self_organizing_map.get_winner(lowercase_ , lowercase_ )
# Update the winning vector
lowercase =self_organizing_map.update(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# classify test sample
lowercase =[0, 0, 0, 1]
lowercase =self_organizing_map.get_winner(lowercase_ , lowercase_ )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 72 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
print('\nThe shortest path matrix using Floyd Warshall algorithm\n')
for i in range(_UpperCAmelCase):
for j in range(_UpperCAmelCase):
if dist[i][j] != float('inf'):
print(int(dist[i][j]) , end='\t')
else:
print('INF' , end='\t')
print()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = [[float('inf') for _ in range(_UpperCAmelCase)] for _ in range(_UpperCAmelCase)]
for i in range(_UpperCAmelCase):
for j in range(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_UpperCAmelCase):
# looping through rows of graph array
for i in range(_UpperCAmelCase):
# looping through columns of graph array
for j in range(_UpperCAmelCase):
if (
dist[i][k] != float('inf')
and dist[k][j] != float('inf')
and dist[i][k] + dist[k][j] < dist[i][j]
):
SCREAMING_SNAKE_CASE = dist[i][k] + dist[k][j]
_print_dist(_UpperCAmelCase , _UpperCAmelCase)
return dist, v
if __name__ == "__main__":
a_ : Dict = int(input('Enter number of vertices: '))
a_ : str = int(input('Enter number of edges: '))
a_ : Tuple = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
a_ : Dict = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
a_ : str = int(input('Enter source:'))
a_ : Any = int(input('Enter destination:'))
a_ : List[Any] = float(input('Enter weight:'))
a_ : str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 73 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ : Optional[Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __SCREAMING_SNAKE_CASE ( a__ : str=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) )
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[int] = None
_lowercase : str = None
def lowerCAmelCase_ ( self : Dict , __A : Optional[int] , __A : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
__A : List[Any] = dataset_module_factory(__A , cache_dir=__A )
__A : Tuple = import_main_class(dataset_module.module_path , dataset=__A )
__A : DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
__A : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__A : Union[str, Any] = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]:
__A : Optional[Any] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__A : Union[str, Any] = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : List[Any] = import_main_class(dataset_module.module_path )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__A : Any = None
builder_instance.download_and_prepare()
__A : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> List[str]:
__A : Tuple = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : str = import_main_class(dataset_module.module_path ,dataset=a__ )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
__A : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a__ ,a__ )
assert "train" in ds
assert isinstance(ds["""train"""] ,a__ )
assert next(iter(ds["""train"""] ) )
| 17 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : list[Any] = []
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : int = 0
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return self.head == self.tail
def UpperCAmelCase__ ( self : Tuple , _A : Any ):
"""simple docstring"""
self.data.append(_A )
__SCREAMING_SNAKE_CASE : Dict = self.tail + 1
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.data[self.head]
__SCREAMING_SNAKE_CASE : Dict = self.head + 1
return ret
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return self.tail - self.head
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = data
__SCREAMING_SNAKE_CASE : MyNode | None = None
__SCREAMING_SNAKE_CASE : MyNode | None = None
__SCREAMING_SNAKE_CASE : int = 1
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return self.data
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
return self.left
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.right
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return self.height
def UpperCAmelCase__ ( self : str , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = data
def UpperCAmelCase__ ( self : Union[str, Any] , _A : MyNode | None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = node
def UpperCAmelCase__ ( self : List[str] , _A : MyNode | None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = node
def UpperCAmelCase__ ( self : Optional[Any] , _A : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = height
def a__ ( snake_case ):
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if a > b:
return a
return b
def a__ ( snake_case ):
"""simple docstring"""
print('''left rotation node:''' , node.get_data() )
__SCREAMING_SNAKE_CASE : int = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(snake_case )
return ret
def a__ ( snake_case ):
"""simple docstring"""
print('''right rotation node:''' , node.get_data() )
__SCREAMING_SNAKE_CASE : List[Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(snake_case )
return ret
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(snake_case ) )
return right_rotation(snake_case )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = node.get_right()
assert right_child is not None
node.set_right(right_rotation(snake_case ) )
return left_rotation(snake_case )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if node is None:
return MyNode(snake_case )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , snake_case ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__SCREAMING_SNAKE_CASE : Optional[int] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__SCREAMING_SNAKE_CASE : Optional[Any] = right_rotation(snake_case )
else:
__SCREAMING_SNAKE_CASE : List[str] = lr_rotation(snake_case )
else:
node.set_right(insert_node(node.get_right() , snake_case ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__SCREAMING_SNAKE_CASE : int = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__SCREAMING_SNAKE_CASE : Any = rl_rotation(snake_case )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = left_rotation(snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(snake_case )
return node
def a__ ( snake_case ):
"""simple docstring"""
while True:
__SCREAMING_SNAKE_CASE : int = root.get_right()
if right_child is None:
break
__SCREAMING_SNAKE_CASE : Union[str, Any] = right_child
return root.get_data()
def a__ ( snake_case ):
"""simple docstring"""
while True:
__SCREAMING_SNAKE_CASE : List[str] = root.get_left()
if left_child is None:
break
__SCREAMING_SNAKE_CASE : str = left_child
return root.get_data()
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = root.get_left()
__SCREAMING_SNAKE_CASE : List[Any] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__SCREAMING_SNAKE_CASE : List[str] = get_left_most(snake_case )
root.set_data(snake_case )
root.set_right(del_node(snake_case , snake_case ) )
elif left_child is not None:
__SCREAMING_SNAKE_CASE : List[Any] = left_child
elif right_child is not None:
__SCREAMING_SNAKE_CASE : Dict = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(snake_case , snake_case ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(snake_case , snake_case ) )
if get_height(snake_case ) - get_height(snake_case ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__SCREAMING_SNAKE_CASE : Dict = left_rotation(snake_case )
else:
__SCREAMING_SNAKE_CASE : Any = rl_rotation(snake_case )
elif get_height(snake_case ) - get_height(snake_case ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__SCREAMING_SNAKE_CASE : Optional[Any] = right_rotation(snake_case )
else:
__SCREAMING_SNAKE_CASE : Any = lr_rotation(snake_case )
__SCREAMING_SNAKE_CASE : Any = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(snake_case )
return root
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : MyNode | None = None
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return get_height(self.root )
def UpperCAmelCase__ ( self : str , _A : Any ):
"""simple docstring"""
print('''insert:''' + str(_A ) )
__SCREAMING_SNAKE_CASE : Dict = insert_node(self.root , _A )
def UpperCAmelCase__ ( self : Tuple , _A : Any ):
"""simple docstring"""
print('''delete:''' + str(_A ) )
if self.root is None:
print('''Tree is empty!''' )
return
__SCREAMING_SNAKE_CASE : int = del_node(self.root , _A )
def __str__( self : int , ): # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = ''''''
__SCREAMING_SNAKE_CASE : Optional[int] = MyQueue()
q.push(self.root )
__SCREAMING_SNAKE_CASE : Any = self.get_height()
if layer == 0:
return output
__SCREAMING_SNAKE_CASE : Optional[int] = 0
while not q.is_empty():
__SCREAMING_SNAKE_CASE : str = q.pop()
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(_A )
q.push(_A )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__SCREAMING_SNAKE_CASE : Optional[int] = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , _A ) - 1:
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def a__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowercase_ = AVLtree()
lowercase_ = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 74 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 | 0 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
UpperCamelCase__ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = create_model(
'''HTSAT-tiny''' , '''roberta''' , lowerCAmelCase__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=lowerCAmelCase__ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = R'''.*sequential.(\d+).*'''
UpperCAmelCase__ : Optional[int] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase__ : List[str] = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase__ : Any = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase__ : Any = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(lowerCAmelCase__ )//3}.linear.""" )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase__ : Dict = 1 if projecton_layer == 0 else 2
UpperCAmelCase__ : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase__ : Dict = value
UpperCAmelCase__ : List[Any] = mixed_qkv.size(0 ) // 3
UpperCAmelCase__ : str = mixed_qkv[:qkv_dim]
UpperCAmelCase__ : Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase__ : Union[str, Any] = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase__ : str = query_layer
UpperCAmelCase__ : Tuple = key_layer
UpperCAmelCase__ : int = value_layer
else:
UpperCAmelCase__ : List[Any] = value
return model_state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[int]:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase__ : Union[str, Any] = clap_model.state_dict()
UpperCAmelCase__ : Optional[int] = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = ClapConfig()
UpperCAmelCase__ : Tuple = enable_fusion
UpperCAmelCase__ : Union[str, Any] = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
UpperCamelCase__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 75 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> str:
__lowercase : Any = '''hf-internal-testing/tiny-random-t5'''
__lowercase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ )
__lowercase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ )
__lowercase : Tuple = tokenizer('''This is me''' , return_tensors='''pt''' )
__lowercase : List[str] = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__lowercase : Dict = model.generate(**UpperCamelCase_ )
__lowercase : int = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ )
__lowercase : str = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__lowercase : List[str] = model_reloaded.generate(**UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ ) )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : str = '''hf-internal-testing/tiny-random-t5'''
__lowercase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ )
__lowercase : Any = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(UpperCamelCase_ ):
model.save_pretrained(UpperCamelCase_ )
__lowercase : Tuple = model.reverse_bettertransformer()
model.save_pretrained(UpperCamelCase_ )
| 76 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 0 ) -> list:
"""simple docstring"""
__UpperCAmelCase : List[str] = length or len(UpperCamelCase )
__UpperCAmelCase : List[str] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__UpperCAmelCase , __UpperCAmelCase : int = list_data[i + 1], list_data[i]
__UpperCAmelCase : List[Any] = True
return list_data if not swapped else bubble_sort(UpperCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17 | 0 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
SCREAMING_SNAKE_CASE_: str ='\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
SCREAMING_SNAKE_CASE_: Tuple ='\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
SCREAMING_SNAKE_CASE_: Optional[int] ='\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
SCREAMING_SNAKE_CASE_: Optional[Any] ='The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : str ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def _lowercase (self : Optional[Any] , __a : int , __a : Union[str, Any] , __a : str=[1, 10, 100] , __a : Optional[int]=4 , __a : List[Any]=3.0 ):
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=__a ) as executor:
UpperCAmelCase_ = []
UpperCAmelCase_ = Counter()
UpperCAmelCase_ = 0
UpperCAmelCase_ = defaultdict(__a )
for task_id, (candidates, test_case) in enumerate(zip(__a , __a ) ):
for candidate in candidates:
UpperCAmelCase_ = candidate + "\n" + test_case
UpperCAmelCase_ = (test_program, timeout, task_id, completion_id[task_id])
UpperCAmelCase_ = executor.submit(__a , *__a )
futures.append(__a )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__a ):
UpperCAmelCase_ = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
UpperCAmelCase_ , UpperCAmelCase_ = [], []
for result in results.values():
result.sort()
UpperCAmelCase_ = [r[1]["passed"] for r in result]
total.append(len(__a ) )
correct.append(sum(__a ) )
UpperCAmelCase_ = np.array(__a )
UpperCAmelCase_ = np.array(__a )
UpperCAmelCase_ = k
UpperCAmelCase_ = {f"""pass@{k}""": estimate_pass_at_k(__a , __a , __a ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Optional[int] ) -> Tuple:
'''simple docstring'''
def estimator(snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ = itertools.repeat(snake_case_ , len(snake_case_ ) )
else:
assert len(snake_case_ ) == len(snake_case_ )
UpperCAmelCase_ = iter(snake_case_ )
return np.array([estimator(int(snake_case_ ) , int(snake_case_ ) , snake_case_ ) for n, c in zip(snake_case_ , snake_case_ )] )
| 78 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
return f"gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy"
def __UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __UpperCAmelCase ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 4, 64, 64) , _lowerCAmelCase=False ):
UpperCAmelCase__ : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase__ : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) , dtype=_lowerCAmelCase )
return image
def __UpperCAmelCase ( self , _lowerCAmelCase=False , _lowerCAmelCase="CompVis/stable-diffusion-v1-4" ):
UpperCAmelCase__ : int = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase__ : Optional[Any] = """bf16""" if fpaa else None
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = FlaxUNetaDConditionModel.from_pretrained(
_lowerCAmelCase , subfolder="""unet""" , dtype=_lowerCAmelCase , revision=_lowerCAmelCase )
return model, params
def __UpperCAmelCase ( self , _lowerCAmelCase=0 , _lowerCAmelCase=(4, 77, 768) , _lowerCAmelCase=False ):
UpperCAmelCase__ : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase__ : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) , dtype=_lowerCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = self.get_latents(_lowerCAmelCase , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Dict = self.get_encoder_hidden_states(_lowerCAmelCase , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = model.apply(
{"""params""": params} , _lowerCAmelCase , jnp.array(_lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=_lowerCAmelCase , ).sample
assert sample.shape == latents.shape
UpperCAmelCase__ : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
UpperCAmelCase__ : List[Any] = jnp.array(_lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = self.get_latents(_lowerCAmelCase , shape=(4, 4, 96, 96) , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Any = self.get_encoder_hidden_states(_lowerCAmelCase , shape=(4, 77, 1024) , fpaa=_lowerCAmelCase )
UpperCAmelCase__ : Dict = model.apply(
{"""params""": params} , _lowerCAmelCase , jnp.array(_lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=_lowerCAmelCase , ).sample
assert sample.shape == latents.shape
UpperCAmelCase__ : Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
UpperCAmelCase__ : Any = jnp.array(_lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-2 )
| 79 |
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : Tuple = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
__lowercase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase , id=lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if exitstatus == 5:
__lowercase = 0
# Doctest custom flag to ignore output.
__UpperCamelCase : Optional[Any] = doctest.register_optionflag("""IGNORE_RESULT""")
__UpperCamelCase : List[Any] = doctest.OutputChecker
class __UpperCamelCase ( _lowerCAmelCase ):
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__UpperCamelCase : Optional[int] = CustomOutputChecker
__UpperCamelCase : Optional[Any] = HfDoctestModule
__UpperCamelCase : Any = HfDocTestParser
| 80 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : int , __A : Tuple , __A : List[Any] ):
__A : Optional[int] = None
__A : Any = None
__A : int = graph
self._normalize_graph(__A , __A )
__A : str = len(__A )
__A : Optional[int] = None
def lowerCAmelCase_ ( self : int , __A : Any , __A : Optional[Any] ):
if sources is int:
__A : Dict = [sources]
if sinks is int:
__A : Optional[int] = [sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
__A : str = sources[0]
__A : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
__A : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : str = max_input_flow
__A : Union[str, Any] = 0
__A : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : int = max_input_flow
__A : Optional[Any] = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict ):
__A : Dict = algorithm(self )
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : str ):
__A : Any = flow_network
__A : int = flow_network.verticesCount
__A : List[Any] = flow_network.sourceIndex
__A : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : str = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Any = True
def lowerCAmelCase_ ( self : List[str] ):
pass
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Any , __A : List[str] ):
super().__init__(__A )
# use this to save your result
__A : str = -1
def lowerCAmelCase_ ( self : Any ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Dict ):
super().__init__(__A )
__A : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Optional[Any] = [0] * self.verticies_count
__A : Union[str, Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Dict = 0
while i < len(__A ):
__A : List[Any] = vertices_list[i]
__A : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
__A : Any = 0
else:
i += 1
__A : Optional[int] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def lowerCAmelCase_ ( self : Dict , __A : List[str] , __A : Optional[Any] ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Dict = self.heights[to_index]
if min_height is not None:
__A : Optional[int] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17 | 0 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
_snake_case : Dict = parser.parse_args()
if args.model_type == "bert":
_snake_case : Dict = BertForMaskedLM.from_pretrained(args.model_name)
_snake_case : List[Any] = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
_snake_case : List[str] = model.state_dict()
_snake_case : List[Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
_snake_case : Union[str, Any] = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
_snake_case : Dict = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
_snake_case : str = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_snake_case : Optional[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
_snake_case : int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
_snake_case : Optional[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
_snake_case : int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
_snake_case : Any = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
_snake_case : List[str] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
_snake_case : Union[str, Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
_snake_case : Tuple = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
_snake_case : Dict = state_dict["cls.predictions.decoder.weight"]
_snake_case : Tuple = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
_snake_case : Dict = state_dict[f'''cls.predictions.transform.dense.{w}''']
_snake_case : int = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 81 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> str | Literal[False]:
__A : Tuple = list(a__ )
__A : Optional[int] = list(a__ )
__A : int = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
__A : int = """_"""
if count > 1:
return False
else:
return "".join(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ) -> list[str]:
__A : Optional[Any] = []
while True:
__A : Tuple = ["""$"""] * len(a__ )
__A : Union[str, Any] = []
for i in range(len(a__ ) ):
for j in range(i + 1 ,len(a__ ) ):
__A : int = compare_string(binary[i] ,binary[j] )
if k is False:
__A : List[str] = """*"""
__A : Any = """*"""
temp.append("""X""" )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
__A : Optional[Any] = list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : Sequence[float] ) -> list[str]:
__A : List[str] = []
for minterm in minterms:
__A : List[Any] = """"""
for _ in range(a__ ):
__A : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : int ) -> bool:
__A : Optional[Any] = list(a__ )
__A : Tuple = list(a__ )
__A : Any = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __SCREAMING_SNAKE_CASE ( a__ : list[list[int]] ,a__ : list[str] ) -> list[str]:
__A : Optional[int] = []
__A : Tuple = [0] * len(a__ )
for i in range(len(chart[0] ) ):
__A : str = 0
__A : Any = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
__A : Optional[Any] = j
if count == 1:
__A : int = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
__A : List[str] = 0
temp.append(prime_implicants[i] )
while True:
__A : Optional[Any] = 0
__A : Any = -1
__A : int = 0
for i in range(len(a__ ) ):
__A : List[Any] = chart[i].count(1 )
if count_n > max_n:
__A : Dict = count_n
__A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
__A : Union[str, Any] = 0
def __SCREAMING_SNAKE_CASE ( a__ : list[str] ,a__ : list[str] ) -> list[list[int]]:
__A : Any = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
__A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,a__ ):
__A : Union[str, Any] = 1
return chart
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Any = int(input("""Enter the no. of variables\n""" ) )
__A : List[str] = [
float(a__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__A : Dict = decimal_to_binary(a__ ,a__ )
__A : Union[str, Any] = check(a__ )
print("""Prime Implicants are:""" )
print(a__ )
__A : Optional[Any] = prime_implicant_chart(a__ ,a__ )
__A : Any = selection(a__ ,a__ )
print("""Essential Prime Implicants are:""" )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float(moles / volume ) * nfactor )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : Dict ,a__ : Union[str, Any] ,a__ : Any ) -> Optional[int]: # noqa: E741
while r - l > 1:
__A : Any = (l + r) // 2
if v[m] >= key:
__A : Optional[int] = m
else:
__A : List[Any] = m # noqa: E741
return r
def __SCREAMING_SNAKE_CASE ( a__ : list[int] ) -> int:
if len(a__ ) == 0:
return 0
__A : str = [0] * len(a__ )
__A : List[str] = 1
__A : List[Any] = v[0]
for i in range(1 ,len(a__ ) ):
if v[i] < tail[0]:
__A : int = v[i]
elif v[i] > tail[length - 1]:
__A : Union[str, Any] = v[i]
length += 1
else:
__A : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def snake_case_ ( ):
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def snake_case_ ( A_ : Tuple, A_ : List[Any], A_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(A_, ['''torch'''] )
_check_torch_version()
_lowerCamelCase : Optional[Any] = image_tensor.unsqueeze(0 )
_lowerCamelCase : Optional[Any] = torch.nn.functional.unfold(A_, (patch_height, patch_width), stride=(patch_height, patch_width) )
_lowerCamelCase : Tuple = patches.reshape(image_tensor.size(0 ), image_tensor.size(1 ), A_, A_, -1 )
_lowerCamelCase : str = patches.permute(0, 4, 2, 3, 1 ).reshape(
image_tensor.size(2 ) // patch_height, image_tensor.size(3 ) // patch_width, image_tensor.size(1 ) * patch_height * patch_width, )
return patches.unsqueeze(0 )
def snake_case_ ( A_ : str, A_ : int = 36, A_ : str = "black", A_ : str = "white", A_ : int = 5, A_ : int = 5, A_ : int = 5, A_ : int = 5, A_ : Optional[bytes] = None, A_ : Optional[str] = None, ):
'''simple docstring'''
requires_backends(A_, '''vision''' )
# Add new lines so that each line is no more than 80 characters.
_lowerCamelCase : Optional[Any] = textwrap.TextWrapper(width=80 )
_lowerCamelCase : int = wrapper.wrap(text=A_ )
_lowerCamelCase : List[Any] = '''\n'''.join(A_ )
if font_bytes is not None and font_path is None:
_lowerCamelCase : Optional[int] = io.BytesIO(A_ )
elif font_path is not None:
_lowerCamelCase : Union[str, Any] = font_path
else:
_lowerCamelCase : str = hf_hub_download(A_, '''Arial.TTF''' )
_lowerCamelCase : Optional[Any] = ImageFont.truetype(A_, encoding='''UTF-8''', size=A_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_lowerCamelCase : Union[str, Any] = ImageDraw.Draw(Image.new('''RGB''', (1, 1), A_ ) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = temp_draw.textbbox((0, 0), A_, A_ )
# Create the actual image with a bit of padding around the text.
_lowerCamelCase : int = text_width + left_padding + right_padding
_lowerCamelCase : Dict = text_height + top_padding + bottom_padding
_lowerCamelCase : int = Image.new('''RGB''', (image_width, image_height), A_ )
_lowerCamelCase : Optional[Any] = ImageDraw.Draw(A_ )
draw.text(xy=(left_padding, top_padding), text=A_, fill=A_, font=A_ )
return image
def snake_case_ ( A_ : np.ndarray, A_ : str, **A_ : Optional[int] ):
'''simple docstring'''
requires_backends(A_, '''vision''' )
# Convert to PIL image if necessary
_lowerCamelCase : List[str] = to_pil_image(A_ )
_lowerCamelCase : Union[str, Any] = render_text(A_, **A_ )
_lowerCamelCase : int = max(header_image.width, image.width )
_lowerCamelCase : List[Any] = int(image.height * (new_width / image.width) )
_lowerCamelCase : Dict = int(header_image.height * (new_width / header_image.width) )
_lowerCamelCase : List[Any] = Image.new('''RGB''', (new_width, new_height + new_header_height), '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ), (0, 0) )
new_image.paste(image.resize((new_width, new_height) ), (0, new_header_height) )
# Convert back to the original framework if necessary
_lowerCamelCase : Optional[int] = to_numpy_array(A_ )
if infer_channel_dimension_format(A_ ) == ChannelDimension.LAST:
_lowerCamelCase : int = to_channel_dimension_format(A_, ChannelDimension.LAST )
return new_image
class __snake_case ( _lowercase):
snake_case__ : str = ["flattened_patches"]
def __init__( self : str , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : int = 2_0_4_8 , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
_lowerCamelCase : List[str] = do_normalize
_lowerCamelCase : Dict = do_convert_rgb
_lowerCamelCase : Union[str, Any] = max_patches
_lowerCamelCase : Any = is_vqa
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : int , __lowerCAmelCase : dict , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self.extract_flattened_patches , '''torch''' )
_check_torch_version()
# convert to torch
_lowerCamelCase : Any = to_channel_dimension_format(__lowerCAmelCase , ChannelDimension.FIRST )
_lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = patch_size['''height'''], patch_size['''width''']
_lowerCamelCase , _lowerCamelCase : Dict = get_image_size(__lowerCAmelCase )
# maximize scale s.t.
_lowerCamelCase : Any = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_lowerCamelCase : str = max(min(math.floor(scale * image_height / patch_height ) , __lowerCAmelCase ) , 1 )
_lowerCamelCase : Optional[int] = max(min(math.floor(scale * image_width / patch_width ) , __lowerCAmelCase ) , 1 )
_lowerCamelCase : List[Any] = max(num_feasible_rows * patch_height , 1 )
_lowerCamelCase : str = max(num_feasible_cols * patch_width , 1 )
_lowerCamelCase : Optional[Any] = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=__lowerCAmelCase , antialias=__lowerCAmelCase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_lowerCamelCase : int = torch_extract_patches(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = patches.shape
_lowerCamelCase : int = patches_shape[1]
_lowerCamelCase : Union[str, Any] = patches_shape[2]
_lowerCamelCase : Optional[int] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_lowerCamelCase : List[Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_lowerCamelCase : List[Any] = torch.arange(__lowerCAmelCase ).reshape([rows, 1] ).repeat(1 , __lowerCAmelCase ).reshape([rows * columns, 1] )
_lowerCamelCase : Any = torch.arange(__lowerCAmelCase ).reshape([1, columns] ).repeat(__lowerCAmelCase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_lowerCamelCase : int = row_ids.to(torch.floataa )
_lowerCamelCase : str = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_lowerCamelCase : Any = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_lowerCamelCase : Any = torch.nn.functional.pad(__lowerCAmelCase , [0, 0, 0, max_patches - (rows * columns)] ).float()
_lowerCamelCase : Any = to_numpy_array(__lowerCAmelCase )
return result
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Dict ):
"""simple docstring"""
if image.dtype == np.uinta:
_lowerCamelCase : Tuple = image.astype(np.floataa )
# take mean across the whole `image`
_lowerCamelCase : List[str] = np.mean(__lowerCAmelCase )
_lowerCamelCase : Dict = np.std(__lowerCAmelCase )
_lowerCamelCase : Tuple = max(__lowerCAmelCase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : ImageInput , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[Dict[str, int]] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : Any , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCamelCase : str = patch_size if patch_size is not None else self.patch_size
_lowerCamelCase : List[Any] = max_patches if max_patches is not None else self.max_patches
_lowerCamelCase : Any = self.is_vqa
if kwargs.get('''data_format''' , __lowerCAmelCase ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
_lowerCamelCase : Tuple = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCamelCase : Optional[Any] = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_lowerCamelCase : Optional[Any] = [to_numpy_array(__lowerCAmelCase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
_lowerCamelCase : str = kwargs.pop('''font_bytes''' , __lowerCAmelCase )
_lowerCamelCase : int = kwargs.pop('''font_path''' , __lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = [header_text] * len(__lowerCAmelCase )
_lowerCamelCase : Dict = [
render_header(__lowerCAmelCase , header_text[i] , font_bytes=__lowerCAmelCase , font_path=__lowerCAmelCase )
for i, image in enumerate(__lowerCAmelCase )
]
if do_normalize:
_lowerCamelCase : Dict = [self.normalize(image=__lowerCAmelCase ) for image in images]
# convert to torch tensor and permute
_lowerCamelCase : Optional[int] = [
self.extract_flattened_patches(image=__lowerCAmelCase , max_patches=__lowerCAmelCase , patch_size=__lowerCAmelCase )
for image in images
]
# create attention mask in numpy
_lowerCamelCase : Union[str, Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_lowerCamelCase : str = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=__lowerCAmelCase )
return encoded_outputs
| 83 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 17 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , ):
super().__init__()
lowercase = value_function
lowercase = unet
lowercase = scheduler
lowercase = env
lowercase = env.get_dataset()
lowercase = {}
for key in self.data.keys():
try:
lowercase = self.data[key].mean()
except: # noqa: E722
pass
lowercase = {}
for key in self.data.keys():
try:
lowercase = self.data[key].std()
except: # noqa: E722
pass
lowercase = env.observation_space.shape[0]
lowercase = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if type(snake_case ) is dict:
return {k: self.to_torch(snake_case ) for k, v in x_in.items()}
elif torch.is_tensor(snake_case ):
return x_in.to(self.unet.device )
return torch.tensor(snake_case , device=self.unet.device )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
for key, val in cond.items():
lowercase = val.clone()
return x_in
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
lowercase = x.shape[0]
lowercase = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase = torch.full((batch_size,) , snake_case , device=self.unet.device , dtype=torch.long )
for _ in range(snake_case ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase = self.value_function(x.permute(0 , 2 , 1 ) , snake_case ).sample
lowercase = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase = self.scheduler._get_variance(snake_case )
lowercase = torch.exp(0.5 * posterior_variance )
lowercase = model_std * grad
lowercase = 0
lowercase = x.detach()
lowercase = x + scale * grad
lowercase = self.reset_xa(snake_case , snake_case , self.action_dim )
lowercase = self.unet(x.permute(0 , 2 , 1 ) , snake_case ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase = self.scheduler.step(snake_case , snake_case , snake_case , predict_epsilon=snake_case )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowercase = self.reset_xa(snake_case , snake_case , self.action_dim )
lowercase = self.to_torch(snake_case )
return x, y
def __call__( self , snake_case , snake_case=64 , snake_case=32 , snake_case=2 , snake_case=0.1 ):
# normalize the observations and create batch dimension
lowercase = self.normalize(snake_case , 'observations' )
lowercase = obs[None].repeat(snake_case , axis=0 )
lowercase = {0: self.to_torch(snake_case )}
lowercase = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase = randn_tensor(snake_case , device=self.unet.device )
lowercase = self.reset_xa(snake_case , snake_case , self.action_dim )
lowercase = self.to_torch(snake_case )
# run the diffusion process
lowercase , lowercase = self.run_diffusion(snake_case , snake_case , snake_case , snake_case )
# sort output trajectories by value
lowercase = y.argsort(0 , descending=snake_case ).squeeze()
lowercase = x[sorted_idx]
lowercase = sorted_values[:, :, : self.action_dim]
lowercase = actions.detach().cpu().numpy()
lowercase = self.de_normalize(snake_case , key='actions' )
# select the action with the highest value
if y is not None:
lowercase = 0
else:
# if we didn't run value guiding, select a random action
lowercase = np.random.randint(0 , snake_case )
lowercase = denorm_actions[selected_index, 0]
return denorm_actions
| 84 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 0 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case ( UpperCamelCase_ ):
def __init__( self : str , a_ : Optional[Any] , a_ : Optional[Any]=13 , a_ : List[Any]=7 , a_ : Optional[int]=True , a_ : Any=True , a_ : Dict=True , a_ : Tuple=True , a_ : str=True , a_ : Tuple=False , a_ : Optional[int]=False , a_ : Union[str, Any]=False , a_ : int=2 , a_ : Dict=99 , a_ : Union[str, Any]=0 , a_ : str=32 , a_ : Union[str, Any]=5 , a_ : Any=4 , a_ : int=0.1 , a_ : Any=0.1 , a_ : List[str]=512 , a_ : Optional[int]=12 , a_ : Union[str, Any]=2 , a_ : List[Any]=0.02 , a_ : Tuple=3 , a_ : int=4 , a_ : Optional[int]="last" , a_ : List[str]=None , a_ : List[Any]=None , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : str = use_input_lengths
SCREAMING_SNAKE_CASE__ : Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : List[str] = gelu_activation
SCREAMING_SNAKE_CASE__ : str = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ : Any = causal
SCREAMING_SNAKE_CASE__ : Optional[Any] = asm
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_langs
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_special
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = num_choices
SCREAMING_SNAKE_CASE__ : List[Any] = summary_type
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_proj
SCREAMING_SNAKE_CASE__ : List[str] = scope
def __lowercase( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowercase( self : int )-> Any:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __lowercase( self : List[Any] , a_ : Union[str, Any] , a_ : Any , a_ : Union[str, Any] , a_ : Any , a_ : Union[str, Any] , a_ : List[str] , a_ : List[Any] , a_ : int , a_ : Any , )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = FlaubertModel(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , lengths=a_ , langs=a_ )
SCREAMING_SNAKE_CASE__ : int = model(a_ , langs=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase( self : str , a_ : str , a_ : int , a_ : Union[str, Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : Any , a_ : int , a_ : Tuple , a_ : str , )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaubertWithLMHeadModel(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase( self : Any , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : str , a_ : int , a_ : Union[str, Any] , a_ : List[Any] , a_ : Any , a_ : Any , a_ : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = FlaubertForQuestionAnsweringSimple(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase( self : Tuple , a_ : Tuple , a_ : int , a_ : Optional[Any] , a_ : Optional[int] , a_ : Tuple , a_ : List[Any] , a_ : int , a_ : Optional[int] , a_ : int , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaubertForQuestionAnswering(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , p_mask=a_ , )
SCREAMING_SNAKE_CASE__ : List[Any] = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , )
((SCREAMING_SNAKE_CASE__) , ) : Optional[Any] = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , start_positions=a_ , end_positions=a_ )
((SCREAMING_SNAKE_CASE__) , ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowercase( self : Dict , a_ : int , a_ : Optional[int] , a_ : Tuple , a_ : Dict , a_ : Any , a_ : List[str] , a_ : str , a_ : List[Any] , a_ : int , )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = FlaubertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase( self : List[str] , a_ : Tuple , a_ : str , a_ : Optional[Any] , a_ : List[Any] , a_ : Tuple , a_ : List[Any] , a_ : List[Any] , a_ : Optional[int] , a_ : List[str] , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.num_labels
SCREAMING_SNAKE_CASE__ : Any = FlaubertForTokenClassification(a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase( self : Optional[int] , a_ : Union[str, Any] , a_ : str , a_ : List[str] , a_ : Any , a_ : Any , a_ : Any , a_ : List[Any] , a_ : Optional[Any] , a_ : Any , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_choices
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaubertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
SCREAMING_SNAKE_CASE__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Dict = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowercase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowercase( self : Union[str, Any] , a_ : str , a_ : List[Any] , a_ : int , a_ : str , a_ : Union[str, Any] )-> int:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowercase( self : Optional[Any] , a_ : List[Any] , a_ : int , a_ : List[str]=False )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self , config_class=a_ , emb_dim=37 )
def __lowercase( self : Dict )-> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase( self : Tuple )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a_ )
def __lowercase( self : Optional[Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a_ )
def __lowercase( self : Union[str, Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a_ )
def __lowercase( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a_ )
def __lowercase( self : List[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a_ )
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a_ )
def __lowercase( self : Tuple )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a_ )
@slow
def __lowercase( self : int )-> List[Any]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Tuple = FlaubertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def __lowercase( self : str )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Any = model_class(config=a_ )
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = torch.jit.trace(
a_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , 'traced_model.pt' ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.jit.load(os.path.join(a_ , 'traced_model.pt' ) , map_location=a_ )
loaded(inputs_dict['input_ids'].to(a_ ) , inputs_dict['attention_mask'].to(a_ ) )
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ )[0]
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a_ )
SCREAMING_SNAKE_CASE__ : str = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
| 85 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
A_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase )
A_ = -1
A_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
A_ = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase )
A_ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ = TextStreamer(UpperCAmelCase )
model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ = cs.out[:-1]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Tuple ):
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
A_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase )
A_ = -1
A_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
A_ = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase )
A_ = tokenizer.decode(greedy_ids[0] )
A_ = TextIteratorStreamer(UpperCAmelCase )
A_ = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
A_ = Thread(target=model.generate , kwargs=UpperCAmelCase )
thread.start()
A_ = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[str] ):
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
A_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase )
A_ = -1
A_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
A_ = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase )
A_ = greedy_ids[:, input_ids.shape[1] :]
A_ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ = TextStreamer(UpperCAmelCase , skip_prompt=UpperCAmelCase )
model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ = cs.out[:-1]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[Any] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
A_ = AutoTokenizer.from_pretrained("distilgpt2" )
A_ = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(UpperCAmelCase )
A_ = -1
A_ = torch.ones((1, 5) , device=UpperCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ = TextStreamer(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
model.generate(UpperCAmelCase , max_new_tokens=1 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ = cs.out[:-1] # Remove the final "\n"
A_ = tokenizer(UpperCAmelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __A ( self : int ):
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
A_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase )
A_ = -1
A_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
A_ = TextIteratorStreamer(UpperCAmelCase , timeout=0.001 )
A_ = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
A_ = Thread(target=model.generate , kwargs=UpperCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCAmelCase ):
A_ = ""
for new_text in streamer:
streamer_text += new_text | 86 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : Union[str, Any] ,a__ : Optional[int] ) -> List[Any]:
# Initialise PyTorch model
__A : Dict = MobileBertConfig.from_json_file(a__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__A : Tuple = MobileBertForPreTraining(a__ )
# Load weights from tf checkpoint
__A : Dict = load_tf_weights_in_mobilebert(a__ ,a__ ,a__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,a__ )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 17 | 0 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = 0
if start < end:
A__ = randint(lowercase_ , lowercase_ )
A__ = a[end]
A__ = a[pivot]
A__ = temp
A__ , A__ = _in_place_partition(lowercase_ , lowercase_ , lowercase_ )
count += _in_place_quick_sort(lowercase_ , lowercase_ , p - 1 )
count += _in_place_quick_sort(lowercase_ , p + 1 , lowercase_ )
return count
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Any:
"""simple docstring"""
A__ = 0
A__ = randint(lowercase_ , lowercase_ )
A__ = a[end]
A__ = a[pivot]
A__ = temp
A__ = start - 1
for index in range(lowercase_ , lowercase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
A__ = new_pivot_index + 1
A__ = a[new_pivot_index]
A__ = a[index]
A__ = temp
A__ = a[new_pivot_index + 1]
A__ = a[end]
A__ = temp
return new_pivot_index + 1, count
_lowerCamelCase : Optional[Any] = TemporaryFile()
_lowerCamelCase : str = 100 # 1000 elements are to be sorted
_lowerCamelCase , _lowerCamelCase : List[str] = 0, 1 # mean and standard deviation
_lowerCamelCase : Union[str, Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
_lowerCamelCase : str = np.load(outfile)
_lowerCamelCase : List[Any] = len(M) - 1
_lowerCamelCase : List[str] = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 87 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase = logging.get_logger(__name__)
class lowercase__ ( A_ ):
__UpperCAmelCase = '''vision-encoder-decoder'''
__UpperCAmelCase = True
def __init__( self , **SCREAMING_SNAKE_CASE) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'A configuraton of type {self.model_type} cannot be instantiated because '
F'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}')
_lowerCamelCase : int = kwargs.pop("""encoder""")
_lowerCamelCase : List[Any] = encoder_config.pop("""model_type""")
_lowerCamelCase : Tuple = kwargs.pop("""decoder""")
_lowerCamelCase : Tuple = decoder_config.pop("""model_type""")
_lowerCamelCase : int = AutoConfig.for_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = True
@classmethod
def UpperCamelCase_ ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""")
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Tuple = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Optional[Any] = copy.deepcopy(self.__dict__)
_lowerCamelCase : Any = self.encoder.to_dict()
_lowerCamelCase : Optional[int] = self.decoder.to_dict()
_lowerCamelCase : List[Any] = self.__class__.model_type
return output
class lowercase__ ( A_ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def UpperCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def UpperCamelCase_ ( self) -> float:
return 1e-4
@property
def UpperCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}})
class lowercase__ ( A_ ):
@property
def UpperCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
_lowerCamelCase : Dict = OrderedDict()
_lowerCamelCase : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_lowerCamelCase : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_lowerCamelCase : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
import torch
_lowerCamelCase : Optional[Any] = OrderedDict()
_lowerCamelCase : List[Any] = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE)
_lowerCamelCase , _lowerCamelCase : List[Any] = dummy_input["""input_ids"""].shape
_lowerCamelCase : List[str] = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCamelCase : List[str] = dummy_input.pop("""input_ids""")
_lowerCamelCase : Tuple = dummy_input.pop("""attention_mask""")
_lowerCamelCase : Dict = torch.zeros(SCREAMING_SNAKE_CASE)
return common_inputs
class lowercase__ ( A_ ):
@property
def UpperCamelCase_ ( self) -> None:
pass
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "default") -> OnnxConfig:
_lowerCamelCase : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
| 88 |
import math
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : List[str]=0 ): # a graph with Node 0,1,...,N-1
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # adjacency matrix for weight
__A : str = [
[math.inf for j in range(0 , __A )] for i in range(0 , __A )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
__A : List[Any] = w
def lowerCAmelCase_ ( self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__A : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 17 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase="resnet50", lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=3, lowerCamelCase=True, lowerCamelCase=True, ) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = parent
_lowercase : List[Any] = out_indices if out_indices is not None else [4]
_lowercase : Optional[int] = stage_names
_lowercase : Any = out_features
_lowercase : int = backbone
_lowercase : Optional[int] = batch_size
_lowercase : Dict = image_size
_lowercase : List[Any] = num_channels
_lowercase : Union[str, Any] = use_pretrained_backbone
_lowercase : Optional[int] = is_training
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : List[str] = self.get_config()
return config, pixel_values
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size, num_channels=self.num_channels, out_features=self.out_features, out_indices=self.out_indices, stage_names=self.stage_names, use_pretrained_backbone=self.use_pretrained_backbone, backbone=self.backbone, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = TimmBackbone(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.feature_map[-1].shape, (self.batch_size, model.channels[-1], 14, 14), )
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
_lowercase , _lowercase : List[Any] = config_and_inputs
_lowercase : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Dict = (TimmBackbone,) if is_torch_available() else ()
lowercase_ : Optional[Any] = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
lowercase_ : Union[str, Any] = False
lowercase_ : Optional[Any] = False
lowercase_ : Optional[Any] = False
lowercase_ : Any = False
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = TimmBackboneModelTester(self)
_lowercase : str = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = 'resnet18'
_lowercase : Any = 'microsoft/resnet-18'
_lowercase : List[str] = AutoBackbone.from_pretrained(lowerCamelCase, use_timm_backbone=lowerCamelCase)
_lowercase : int = AutoBackbone.from_pretrained(lowerCamelCase)
self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names), len(transformers_model.stage_names))
self.assertEqual(timm_model.channels, transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices, (-1,))
self.assertEqual(transformers_model.out_indices, [len(timm_model.stage_names) - 1])
_lowercase : List[Any] = AutoBackbone.from_pretrained(lowerCamelCase, use_timm_backbone=lowerCamelCase, out_indices=[1, 2, 3])
_lowercase : int = AutoBackbone.from_pretrained(lowerCamelCase, out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices, transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features))
self.assertEqual(timm_model.channels, transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('Safetensors is not supported by timm.')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = model_class(lowerCamelCase)
_lowercase : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : List[Any] = [*signature.parameters.keys()]
_lowercase : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : str = True
_lowercase : List[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase : str = self.all_model_classes[0]
_lowercase : Optional[Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
_lowercase : Union[str, Any] = self._prepare_for_class(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = model(**lowerCamelCase)
_lowercase : Optional[Any] = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase : str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase : List[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCamelCase)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(**lowerCamelCase)
self.assertEqual(len(result.feature_maps), len(config.out_indices))
self.assertEqual(len(model.channels), len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase : Dict = copy.deepcopy(lowerCamelCase)
_lowercase : Optional[Any] = None
_lowercase : List[str] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[str] = model(**lowerCamelCase)
self.assertEqual(len(result.feature_maps), 1)
self.assertEqual(len(model.channels), 1)
# Check backbone can be initialized with fresh weights
_lowercase : Dict = copy.deepcopy(lowerCamelCase)
_lowercase : List[str] = False
_lowercase : List[Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(**lowerCamelCase)
| 89 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : complex ,a__ : str = "x" ,a__ : float = 10**-10 ,a__ : int = 1 ,) -> complex:
__A : Tuple = symbols(a__ )
__A : List[str] = lambdify(a__ ,a__ )
__A : Any = lambdify(a__ ,diff(a__ ,a__ ) )
__A : Dict = starting_point
while True:
if diff_function(a__ ) != 0:
__A : Optional[int] = prev_guess - multiplicity * func(a__ ) / diff_function(
a__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__A : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 17 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a__ ( a__ ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase_ , '''width_multiplier''' ) )
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=64 , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_="swish" , lowerCamelCase_=3 , lowerCamelCase_=32 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=10 , lowerCamelCase_=None , lowerCamelCase_=0.25 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , ) -> Tuple:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = make_divisible(5_12 * width_multiplier , divisor=8 )
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = conv_kernel_size
lowerCAmelCase__ = output_stride
lowerCAmelCase__ = classifier_dropout_prob
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = width_multiplier
lowerCAmelCase__ = ffn_dropout
lowerCAmelCase__ = attn_dropout
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = MobileViTVaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = MobileViTVaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = MobileViTVaForSemanticSegmentation(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ : int = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ : Dict = False
lowercase__ : Optional[int] = False
lowercase__ : int = False
lowercase__ : List[str] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = MobileViTVaModelTester(self )
lowerCAmelCase__ = MobileViTVaConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowerCamelCase_ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
def check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = 5
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase__ = 2
for i in range(len(lowerCamelCase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = MobileViTVaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def _snake_case ( ) -> int:
lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
lowerCamelCase_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ = model.to(lowerCamelCase_ )
lowerCAmelCase__ = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
lowerCAmelCase__ = outputs.logits
# verify the logits
lowerCAmelCase__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase_ )
lowerCAmelCase__ = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=lowerCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase_ , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ = model.to(lowerCamelCase_ )
lowerCAmelCase__ = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCamelCase_ )
lowerCAmelCase__ = outputs.logits.detach().cpu()
lowerCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ , target_sizes=[(50, 60)] )
lowerCAmelCase__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase_ )
lowerCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase_ )
lowerCAmelCase__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase_ ) | 90 |
from math import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int = 1000000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 17 | 0 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
A = 'ylacombe/bark-small'
A = tempfile.mkdtemp()
A = 'en_speaker_1'
A = 'This is a test string'
A = 'speaker_embeddings_path.json'
A = 'speaker_embeddings'
def _SCREAMING_SNAKE_CASE ( self : Any ,**A_ : Any ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=A_ )
processor.save_pretrained(self.tmpdirname )
A = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
A = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
A = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token='(BOS)' ,eos_token='(EOS)' ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
A = 35
A = 2
A = 8
A = {
'semantic_prompt': np.ones(A_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A = processor(text=self.input_string ,voice_preset=A_ )
A = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(A_ ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
A = os.path.join(self.tmpdirname ,'file.npz' )
np.savez(A_ ,**A_ )
A = processor(text=self.input_string ,voice_preset=A_ )
A = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(A_ ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
A = processor(text=self.input_string ,voice_preset=self.voice_preset )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = self.get_tokenizer()
A = BarkProcessor(tokenizer=A_ )
A = processor(text=self.input_string )
A = tokenizer(
self.input_string ,padding='max_length' ,max_length=256 ,add_special_tokens=A_ ,return_attention_mask=A_ ,return_token_type_ids=A_ ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() ) | 91 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _lowercase ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self : Dict , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : int , ):
super().__init__(**__A )
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
__A : Dict = get_size_dict(__A , default_to_square=__A )
__A : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : str = do_resize
__A : Dict = size
__A : Any = resample
__A : Optional[Any] = do_center_crop
__A : List[str] = crop_size
__A : Optional[int] = do_rescale
__A : int = rescale_factor
__A : Union[str, Any] = do_normalize
__A : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Optional[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : List[Any] , ):
__A : str = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__A : Dict = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
__A : str = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def lowerCAmelCase_ ( self : List[str] , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[int] ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : Any , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Tuple , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCAmelCase_ ( self : int , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Any = size if size is not None else self.size
__A : Union[str, Any] = get_size_dict(__A , default_to_square=__A )
__A : Tuple = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : int = get_size_dict(__A , param_name="""crop_size""" )
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__A : Optional[int] = image_mean if image_mean is not None else self.image_mean
__A : List[str] = image_std if image_std is not None else self.image_std
__A : Union[str, Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
__A : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__A : List[Any] = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__A : Any = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__A : int = [to_channel_dimension_format(__A , __A ) for image in images]
__A : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCAmelCase_ ( self : int , __A : List[str] , __A : List[Tuple] = None ):
__A : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
__A : str = target_sizes.numpy()
__A : int = []
for idx in range(len(__A ) ):
__A : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
__A : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
__A : List[str] = logits.argmax(dim=1 )
__A : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 17 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int ) -> str:
lowercase : Tuple =int(__magic_name__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(__magic_name__ )
lowercase , lowercase : Optional[Any] =divmod(__magic_name__ , 2 )
return binary_recursive(__magic_name__ ) + str(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : str ) -> str:
lowercase : List[Any] =str(__magic_name__ ).strip()
if not number:
raise ValueError('''No input value was provided''' )
lowercase : str ='''-''' if number.startswith('''-''' ) else ''''''
lowercase : List[str] =number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f'''{negative}0b{binary_recursive(int(__magic_name__ ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 92 |
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = multiprocessing.Manager()
lowerCAmelCase__ :Optional[int] = manager.list()
lowerCAmelCase__ :str = multiprocessing.Process(target=_SCREAMING_SNAKE_CASE , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowerCAmelCase__ :Any = shutil.rmtree
lowerCAmelCase__ :Dict = os.rmdir
lowerCAmelCase__ :int = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowerCAmelCase__ :Optional[Any] = {}
with swallow_io():
with time_limit(_SCREAMING_SNAKE_CASE ):
exec(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F"failed: {e}" )
# Needed for cleaning up.
lowerCAmelCase__ :Optional[Any] = rmtree
lowerCAmelCase__ :Union[str, Any] = rmdir
lowerCAmelCase__ :Optional[int] = chdir
@contextlib.contextmanager
def __A (_SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
def signal_handler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , _SCREAMING_SNAKE_CASE )
signal.signal(signal.SIGALRM , _SCREAMING_SNAKE_CASE )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def __A () ->int:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(_SCREAMING_SNAKE_CASE ):
with contextlib.redirect_stderr(_SCREAMING_SNAKE_CASE ):
with redirect_stdin(_SCREAMING_SNAKE_CASE ):
yield
@contextlib.contextmanager
def __A () ->List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(_SCREAMING_SNAKE_CASE ):
yield dirname
class _lowerCAmelCase ( a ):
"""simple docstring"""
pass
class _lowerCAmelCase ( io.StringIO ):
"""simple docstring"""
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
raise OSError
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
raise OSError
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
raise OSError
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return False
class _lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore
"""simple docstring"""
__magic_name__ :List[Any] = """stdin"""
@contextlib.contextmanager
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
if root == ".":
yield
return
lowerCAmelCase__ :Dict = os.getcwd()
os.chdir(_SCREAMING_SNAKE_CASE )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE=None ) ->int:
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowerCAmelCase__ :Union[str, Any] = None
lowerCAmelCase__ :List[Any] = None
import os
lowerCAmelCase__ :Optional[Any] = '1'
lowerCAmelCase__ :Optional[Any] = None
lowerCAmelCase__ :List[Any] = None
lowerCAmelCase__ :Optional[Any] = None
lowerCAmelCase__ :Optional[int] = None
lowerCAmelCase__ :int = None
lowerCAmelCase__ :Optional[int] = None
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :List[Any] = None
lowerCAmelCase__ :Any = None
lowerCAmelCase__ :Any = None
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :str = None
lowerCAmelCase__ :str = None
lowerCAmelCase__ :List[str] = None
lowerCAmelCase__ :Optional[int] = None
lowerCAmelCase__ :int = None
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :Optional[int] = None
lowerCAmelCase__ :Union[str, Any] = None
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :Optional[int] = None
import shutil
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :str = None
lowerCAmelCase__ :str = None
import subprocess
lowerCAmelCase__ :List[str] = None # type: ignore
lowerCAmelCase__ :int = None
import sys
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :Union[str, Any] = None
lowerCAmelCase__ :List[str] = None
lowerCAmelCase__ :List[str] = None
| 93 |
UpperCAmelCase_ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_5818,
}
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ,a__ : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__A : Optional[int] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 94 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ (__A ):
__magic_name__ = ['''image_processor''', '''tokenizer''']
__magic_name__ = '''FlavaImageProcessor'''
__magic_name__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase_ , )
UpperCAmelCase_ : Tuple = kwargs.pop("feature_extractor" )
UpperCAmelCase_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = self.image_processor
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[ImageInput] = None , lowerCAmelCase_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Optional[int] , ) -> List[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if images is not None:
UpperCAmelCase_ : Optional[Any] = self.image_processor(
lowerCAmelCase_ , return_image_mask=lowerCAmelCase_ , return_codebook_pixels=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ) -> Any:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
UpperCAmelCase_ : List[Any] = self.tokenizer.model_input_names
UpperCAmelCase_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase_ , )
return self.image_processor_class
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase_ , )
return self.image_processor
| 95 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase_ : Optional[Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __SCREAMING_SNAKE_CASE ( a__ : str=True ) -> List[Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) )
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[int] = None
_lowercase : str = None
def lowerCAmelCase_ ( self : Dict , __A : Optional[int] , __A : Optional[Any] ):
with TemporaryDirectory() as tmp_dir:
__A : List[Any] = dataset_module_factory(__A , cache_dir=__A )
__A : Tuple = import_main_class(dataset_module.module_path , dataset=__A )
__A : DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
__A : List[Any] = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__A : Union[str, Any] = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : Dict ) -> Optional[Any]:
__A : Optional[Any] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__A : Union[str, Any] = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : List[Any] = import_main_class(dataset_module.module_path )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__A : Any = None
builder_instance.download_and_prepare()
__A : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ) -> List[str]:
__A : Tuple = dataset_module_factory("""wikipedia""" ,cache_dir=a__ )
__A : str = import_main_class(dataset_module.module_path ,dataset=a__ )
__A : DatasetBuilder = builder_cls(
cache_dir=a__ ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
__A : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a__ ,a__ )
assert "train" in ds
assert isinstance(ds["""train"""] ,a__ )
assert next(iter(ds["""train"""] ) )
| 17 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
def __init__( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int]=1_3 , __snake_case : Tuple=7 , __snake_case : Optional[Any]=True , __snake_case : List[str]=True , __snake_case : List[Any]=True , __snake_case : List[str]=True , __snake_case : Dict=9_9 , __snake_case : Dict=3_2 , __snake_case : Optional[int]=2 , __snake_case : List[Any]=4 , __snake_case : List[str]=3_7 , __snake_case : str="gelu" , __snake_case : List[str]=0.1 , __snake_case : Tuple=0.1 , __snake_case : str=5_1_2 , __snake_case : Optional[int]=1_6 , __snake_case : List[str]=2 , __snake_case : str=0.02 , __snake_case : Optional[int]=3 , __snake_case : List[str]=4 , __snake_case : Any=None , __snake_case : int=0 , ) -> int:
__magic_name__: int = parent
__magic_name__: Tuple = batch_size
__magic_name__: List[Any] = seq_length
__magic_name__: str = is_training
__magic_name__: Union[str, Any] = use_input_mask
__magic_name__: Any = use_token_type_ids
__magic_name__: Tuple = use_labels
__magic_name__: Tuple = vocab_size
__magic_name__: Tuple = hidden_size
__magic_name__: str = num_hidden_layers
__magic_name__: List[Any] = num_attention_heads
__magic_name__: Any = intermediate_size
__magic_name__: List[Any] = hidden_act
__magic_name__: Optional[int] = hidden_dropout_prob
__magic_name__: Dict = attention_probs_dropout_prob
__magic_name__: Union[str, Any] = max_position_embeddings
__magic_name__: Tuple = type_vocab_size
__magic_name__: List[str] = type_sequence_label_size
__magic_name__: Union[str, Any] = initializer_range
__magic_name__: List[str] = num_labels
__magic_name__: List[Any] = num_choices
__magic_name__: str = scope
__magic_name__: Dict = projection_dim
def lowerCamelCase__ ( self : List[Any] ) -> int:
__magic_name__: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__: List[str] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__magic_name__: Dict = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__: Dict = None
if self.use_token_type_ids:
__magic_name__: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__: str = None
__magic_name__: Tuple = None
__magic_name__: List[str] = None
if self.use_labels:
__magic_name__: Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__: Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__: Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__: List[str] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
__magic_name__: Any = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : str , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[Any]:
__magic_name__: int = TFDPRContextEncoder(config=__snake_case )
__magic_name__: Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
__magic_name__: Optional[Any] = model(__snake_case , token_type_ids=__snake_case )
__magic_name__: Tuple = model(__snake_case )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCamelCase__ ( self : List[str] , __snake_case : int , __snake_case : List[str] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[Any] ) -> int:
__magic_name__: Any = TFDPRQuestionEncoder(config=__snake_case )
__magic_name__: Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
__magic_name__: List[Any] = model(__snake_case , token_type_ids=__snake_case )
__magic_name__: int = model(__snake_case )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : str , __snake_case : str , __snake_case : List[str] , __snake_case : Any , __snake_case : int ) -> List[Any]:
__magic_name__: List[str] = TFDPRReader(config=__snake_case )
__magic_name__: Dict = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
__magic_name__: int = self.prepare_config_and_inputs()
(
(
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
),
): Optional[int] = config_and_inputs
__magic_name__: List[str] = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def lowerCamelCase__ ( self : Any ) -> List[Any]:
__magic_name__: List[Any] = TFDPRModelTester(self )
__magic_name__: Optional[Any] = ConfigTester(self , config_class=__snake_case , hidden_size=3_7 )
def lowerCamelCase__ ( self : List[str] ) -> str:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ) -> str:
__magic_name__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__snake_case )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
__magic_name__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__snake_case )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
__magic_name__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__snake_case )
@slow
def lowerCamelCase__ ( self : Optional[int] ) -> int:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__: Tuple = TFDPRContextEncoder.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__: str = TFDPRContextEncoder.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__: Optional[Any] = TFDPRQuestionEncoder.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__: Optional[int] = TFDPRReader.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
__magic_name__: int = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
__magic_name__: Optional[Any] = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
__magic_name__: Optional[Any] = model(__snake_case )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__magic_name__: List[Any] = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 96 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : int=7 , __A : int=3 , __A : int=30 , __A : Dict=400 , __A : str=True , __A : str=None , __A : str=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : List[str]=[0.5, 0.5, 0.5] , __A : Optional[Any]=True , __A : int=1 / 255 , __A : List[Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__A : Union[str, Any] = parent
__A : Union[str, Any] = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[Any] = min_resolution
__A : Union[str, Any] = max_resolution
__A : Any = do_resize
__A : Union[str, Any] = size
__A : Optional[int] = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
__A : Tuple = do_rescale
__A : Optional[Any] = rescale_factor
__A : Tuple = do_pad
def lowerCAmelCase_ ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int] , __A : Dict=False ):
if not batched:
__A : Union[str, Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Union[str, Any] = image.size
else:
__A , __A : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__A : Dict = self.size["""shortest_edge"""]
elif w > h:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__A : Union[str, Any] = self.size["""shortest_edge"""]
__A : str = self.size["""shortest_edge"""]
else:
__A : Any = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Tuple = max(__A , key=lambda __A : item[0] )[0]
__A : Union[str, Any] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : Tuple = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Tuple = DetaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_rescale""" ) )
self.assertTrue(hasattr(__A , """do_pad""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def lowerCAmelCase_ ( self : Any ):
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __A )
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
__A : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Tuple ):
# prepare image and target
__A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__A : Any = json.loads(f.read() )
__A : int = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__A : List[str] = DetaImageProcessor()
__A : List[str] = image_processing(images=__A , annotations=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : List[str] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
# prepare image, target and masks_path
__A : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__A : Tuple = json.loads(f.read() )
__A : Optional[int] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__A : Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__A : Any = DetaImageProcessor(format="""coco_panoptic""" )
__A : int = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="""pt""" )
# verify pixel values
__A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __A )
__A : Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
__A : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __A ) )
# verify boxes
__A : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __A )
__A : Union[str, Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __A , atol=1e-3 ) )
# verify image_id
__A : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __A ) )
# verify is_crowd
__A : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __A ) )
# verify class_labels
__A : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __A ) )
# verify masks
__A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __A )
# verify orig_size
__A : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __A ) )
# verify size
__A : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __A ) )
| 17 | 0 |
__a = [
(1_0_0_0, 'M'),
(9_0_0, 'CM'),
(5_0_0, 'D'),
(4_0_0, 'CD'),
(1_0_0, 'C'),
(9_0, 'XC'),
(5_0, 'L'),
(4_0, 'XL'),
(1_0, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def a ( snake_case__: str ):
'''simple docstring'''
lowercase_ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
lowercase_ = 0
lowercase_ = 0
while place < len(snake_case__ ):
if (place + 1 < len(snake_case__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def a ( snake_case__: int ):
'''simple docstring'''
lowercase_ = []
for arabic, roman in ROMAN:
((lowercase_) , (lowercase_)) = divmod(snake_case__ , snake_case__ )
result.append(roman * factor )
if number == 0:
break
return "".join(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : List[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=a__ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=a__ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=a__ )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A : Union[str, Any] = parse_args()
# Import training_script as a module.
__A : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A : str = script_fpath.stem
__A : int = importlib.import_module(a__ )
# Patch sys.argv
__A : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 17 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def a__ ( lowercase : Dict, lowercase : str, lowercase : str, lowercase : Path, lowercase : str = None, lowercase : str = None, lowercase : str = None, ) -> str:
"""simple docstring"""
if config_name_or_path is None:
_UpperCamelCase = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
_UpperCamelCase = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_UpperCamelCase = question_encoder_name_or_path
_UpperCamelCase = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
_UpperCamelCase = RagConfig.from_pretrained(lowercase )
_UpperCamelCase = AutoConfig.from_pretrained(lowercase )
_UpperCamelCase = AutoConfig.from_pretrained(lowercase )
_UpperCamelCase = gen_config
_UpperCamelCase = question_encoder_config
_UpperCamelCase = model_class.from_pretrained_question_encoder_generator(
lowercase, lowercase, config=lowercase )
rag_model.save_pretrained(lowercase )
# Sanity check.
model_class.from_pretrained(lowercase )
# Save tokenizers.
_UpperCamelCase = AutoTokenizer.from_pretrained(lowercase )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
_UpperCamelCase = AutoTokenizer.from_pretrained(lowercase )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
lowercase__ : Optional[int] = parser.parse_args()
lowercase__ : Union[str, Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 98 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 0 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
__a = nn.Parameter(lowerCAmelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
__a = nn.Parameter(lowerCAmelCase__ )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# set torch weights for 1-to-1 comparison
__a = np.asarray(weights[0] )
__a = np.asarray(weights[1] )
__a = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase__ ).view(-1 , lowerCAmelCase__ ).contiguous().transpose(0 , 1 ) , )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# set torch weights for 1-to-1 comparison
__a = np.asarray(weights[0] )
__a = np.asarray(weights[1] )
__a = np.asarray(weights[2] )
__a = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase__ ).view(-1 , lowerCAmelCase__ ).contiguous().transpose(0 , 1 ) , )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# layernorm 1
__a = weights[0][0][0]
__a = np.asarray(layer_norm_a[0] )
__a = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase__ ) , torch.tensor(lowerCAmelCase__ ) , )
# lsh weights + output
__a = weights[0][1]
if len(lowerCAmelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCAmelCase__ , torch_block.attention , lowerCAmelCase__ )
else:
set_layer_weights_in_torch_local(lowerCAmelCase__ , torch_block.attention , lowerCAmelCase__ )
# intermediate weighs
__a = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCAmelCase__ ) == 4:
__a = intermediate_weights[2]
# layernorm 2
__a = np.asarray(intermediate_weights[0][0] )
__a = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase__ ) , torch.tensor(lowerCAmelCase__ ) , )
# intermediate dense
__a = np.asarray(intermediate_weights[1][0] )
__a = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase__ ) , )
# intermediate out
__a = np.asarray(intermediate_weights[4][0] )
__a = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase__ ) , )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# reformer model
__a = torch_model.reformer
# word embeds
__a = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase__ ) , )
if isinstance(weights[3] , lowerCAmelCase__ ):
__a = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__a = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
__a = nn.Parameter(torch.tensor(lowerCAmelCase__ ) )
__a = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCAmelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__a = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# output layer norm
__a = np.asarray(weights[7][0] )
__a = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase__ ) , torch.tensor(lowerCAmelCase__ ) , )
# output embeddings
__a = np.asarray(weights[9][0] )
__a = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase__ ) , )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# Initialise PyTorch model
__a = ReformerConfig.from_json_file(lowerCAmelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
__a = ReformerModelWithLMHead(lowerCAmelCase__ )
with open(lowerCAmelCase__ , """rb""" ) as f:
__a = pickle.load(lowerCAmelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCAmelCase__ , lowerCAmelCase__ , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 99 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17 | 0 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_A : List[Any] = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
_A : Dict = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
_A : Dict = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def __snake_case ( lowerCAmelCase_ ) -> List[str]:
def remove_articles(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(lowerCAmelCase_ , ''' ''' , lowerCAmelCase_ )
def white_space_fix(lowerCAmelCase_ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
return int(normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ ) )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
SCREAMING_SNAKE_CASE__ = [any(compute_exact(lowerCAmelCase_ , lowerCAmelCase_ ) for ref in refs ) for pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
return (sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )) * 1_0_0
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = [rgram for rgrams in rgramslist for rgram in rgrams]
SCREAMING_SNAKE_CASE__ = Counter(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = Counter(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = Counter()
for sgram, scount in sgramcounter.items():
SCREAMING_SNAKE_CASE__ = scount * numref
SCREAMING_SNAKE_CASE__ = Counter(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = Counter()
for cgram, ccount in cgramcounter.items():
SCREAMING_SNAKE_CASE__ = ccount * numref
# KEEP
SCREAMING_SNAKE_CASE__ = sgramcounter_rep & cgramcounter_rep
SCREAMING_SNAKE_CASE__ = keepgramcounter_rep & rgramcounter
SCREAMING_SNAKE_CASE__ = sgramcounter_rep & rgramcounter
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1
if len(lowerCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = keeptmpscorea / len(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
SCREAMING_SNAKE_CASE__ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
SCREAMING_SNAKE_CASE__ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
SCREAMING_SNAKE_CASE__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
SCREAMING_SNAKE_CASE__ = sgramcounter_rep - cgramcounter_rep
SCREAMING_SNAKE_CASE__ = delgramcounter_rep - rgramcounter
SCREAMING_SNAKE_CASE__ = sgramcounter_rep - rgramcounter
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE__ = 1
if len(lowerCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = deltmpscorea / len(lowerCAmelCase_ )
# ADDITION
SCREAMING_SNAKE_CASE__ = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = set(lowerCAmelCase_ ) & set(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1
if len(lowerCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = addtmpscore / len(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = addtmpscore / len(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 0
if addscore_precision > 0 or addscore_recall > 0:
SCREAMING_SNAKE_CASE__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = len(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = ssent.split(''' ''' )
SCREAMING_SNAKE_CASE__ = csent.split(''' ''' )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for rsent in rsents:
SCREAMING_SNAKE_CASE__ = rsent.split(''' ''' )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
ragramslist.append(lowerCAmelCase_ )
for i in range(0 , len(lowerCAmelCase_ ) - 1 ):
if i < len(lowerCAmelCase_ ) - 1:
SCREAMING_SNAKE_CASE__ = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 2:
SCREAMING_SNAKE_CASE__ = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 3:
SCREAMING_SNAKE_CASE__ = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(lowerCAmelCase_ )
ragramslist.append(lowerCAmelCase_ )
ragramslist.append(lowerCAmelCase_ )
ragramslist.append(lowerCAmelCase_ )
for i in range(0 , len(lowerCAmelCase_ ) - 1 ):
if i < len(lowerCAmelCase_ ) - 1:
SCREAMING_SNAKE_CASE__ = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 2:
SCREAMING_SNAKE_CASE__ = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 3:
SCREAMING_SNAKE_CASE__ = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(lowerCAmelCase_ )
for i in range(0 , len(lowerCAmelCase_ ) - 1 ):
if i < len(lowerCAmelCase_ ) - 1:
SCREAMING_SNAKE_CASE__ = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 2:
SCREAMING_SNAKE_CASE__ = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 3:
SCREAMING_SNAKE_CASE__ = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(lowerCAmelCase_ )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
SCREAMING_SNAKE_CASE__ = sum([delascore, delascore, delascore, delascore] ) / 4
SCREAMING_SNAKE_CASE__ = sum([addascore, addascore, addascore, addascore] ) / 4
SCREAMING_SNAKE_CASE__ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = "13a" , lowerCAmelCase_ = True ) -> Optional[Any]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
SCREAMING_SNAKE_CASE__ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
SCREAMING_SNAKE_CASE__ = sacrebleu.metrics.bleu._get_tokenizer(lowerCAmelCase_ )()(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = sacrebleu.TOKENIZERS[tokenizer]()(lowerCAmelCase_ )
elif tokenizer == "moses":
SCREAMING_SNAKE_CASE__ = sacremoses.MosesTokenizer().tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ , escape=lowerCAmelCase_ )
elif tokenizer == "penn":
SCREAMING_SNAKE_CASE__ = sacremoses.MosesTokenizer().penn_tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = sentence
if not return_str:
SCREAMING_SNAKE_CASE__ = normalized_sent.split()
return normalized_sent
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
if not (len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
SCREAMING_SNAKE_CASE__ = 0
for src, pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
sari_score += SARIsent(normalize(lowerCAmelCase_ ) , normalize(lowerCAmelCase_ ) , [normalize(lowerCAmelCase_ ) for sent in refs] )
SCREAMING_SNAKE_CASE__ = sari_score / len(lowerCAmelCase_ )
return 1_0_0 * sari_score
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="exp" , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> Any:
SCREAMING_SNAKE_CASE__ = len(references[0] )
if any(len(lowerCAmelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
SCREAMING_SNAKE_CASE__ = [[refs[i] for refs in references] for i in range(lowerCAmelCase_ )]
SCREAMING_SNAKE_CASE__ = sacrebleu.corpus_bleu(
lowerCAmelCase_ , lowerCAmelCase_ , smooth_method=lowerCAmelCase_ , smooth_value=lowerCAmelCase_ , force=lowerCAmelCase_ , lowercase=lowerCAmelCase_ , use_effective_order=lowerCAmelCase_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def lowercase_ ( self , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
result.update({'''sari''': compute_sari(sources=A_ , predictions=A_ , references=A_ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=A_ , references=A_ )} )
result.update({'''exact''': compute_em(predictions=A_ , references=A_ )} )
return result
| 100 |
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.