code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_UpperCAmelCase : List[Any] = datasets.load_iris()
_UpperCAmelCase : Dict = np.array(data['''data'''])
_UpperCAmelCase : Union[str, Any] = np.array(data['''target'''])
_UpperCAmelCase : int = data['''target_names''']
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = train_test_split(X, y)
def UpperCamelCase ( lowercase_ : str , lowercase_ : Optional[Any] ) -> int:
'''simple docstring'''
return np.linalg.norm(np.array(lowercase_ ) - np.array(lowercase_ ) )
def UpperCamelCase ( lowercase_ : Any , lowercase_ : Any , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Tuple=5 ) -> List[Any]:
'''simple docstring'''
lowercase =zip(lowercase_ , lowercase_ )
# List of distances of all points from the point to be classified
lowercase =[]
for data_point in data:
lowercase =euclidean_distance(data_point[0] , lowercase_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowercase =[i[1] for i in sorted(lowercase_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowercase =Counter(lowercase_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 72 | def _snake_case ( __snake_case = 100 ):
_UpperCamelCase = (n * (n + 1) // 2) ** 2
_UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: int ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : List[str] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Any = self._create_example_records()
lowerCamelCase__ : str = Dataset.from_list(UpperCamelCase__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(UpperCamelCase__ ):
self.assertDictEqual(UpperCamelCase__ , example_records[i] )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = self._create_example_records()
lowerCamelCase__ : List[str] = Dataset.from_list(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowerCamelCase_ ( self: Optional[Any] ): # checks what happens with missing columns
lowerCamelCase__ : List[Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
lowerCamelCase__ : Dict = Dataset.from_list(UpperCamelCase__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowerCamelCase_ ( self: int ): # checks if the type can be inferred from the second record
lowerCamelCase__ : Dict = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
lowerCamelCase__ : int = Dataset.from_list(UpperCamelCase__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = Dataset.from_list([] )
self.assertEqual(len(UpperCamelCase__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 631 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowercase :
def __init__( self: int , UpperCamelCase__: Dict , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Union[str, Any]=7 , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: List[Any]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=99 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: List[str]=2 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Any="gelu" , UpperCamelCase__: Any=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: List[str]=16 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Dict=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[int]=4 , UpperCamelCase__: Union[str, Any]=None , ):
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Union[str, Any] = 13
lowerCamelCase__ : Any = 7
lowerCamelCase__ : int = True
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Dict = True
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : str = 99
lowerCamelCase__ : Dict = 384
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Optional[Any] = 37
lowerCamelCase__ : Union[str, Any] = """gelu"""
lowerCamelCase__ : int = 0.1
lowerCamelCase__ : Optional[Any] = 0.1
lowerCamelCase__ : List[Any] = 512
lowerCamelCase__ : Optional[Any] = 16
lowerCamelCase__ : Any = 2
lowerCamelCase__ : Optional[Any] = 0.02
lowerCamelCase__ : int = 3
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : Any = 128
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Optional[Any] = 9
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Optional[int] = None
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : str = None
if self.use_input_mask:
lowerCamelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : List[Any] = TFConvBertModel(config=UpperCamelCase__ )
lowerCamelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase__ : List[str] = [input_ids, input_mask]
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : int = TFConvBertForMaskedLM(config=UpperCamelCase__ )
lowerCamelCase__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Dict = TFConvBertForSequenceClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Dict ):
lowerCamelCase__ : Optional[int] = self.num_choices
lowerCamelCase__ : Dict = TFConvBertForMultipleChoice(config=UpperCamelCase__ )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: int ):
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : List[str] = TFConvBertForTokenClassification(config=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase__ )
lowerCamelCase__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : str = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Dict = TFConvBertModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Tuple = True
if hasattr(UpperCamelCase__ , """use_cache""" ):
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Tuple = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Dict = len(model(UpperCamelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """saved_model""" , """1""" )
lowerCamelCase__ : List[Any] = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase__ : Any = model(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : Dict = outputs["""encoder_hidden_states"""]
lowerCamelCase__ : Any = outputs["""encoder_attentions"""]
else:
lowerCamelCase__ : int = outputs["""hidden_states"""]
lowerCamelCase__ : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCamelCase__ : Optional[int] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = getattr(self.model_tester , """key_length""" , UpperCamelCase__ )
def check_decoder_attentions_output(UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : List[Any] = len(UpperCamelCase__ )
self.assertEqual(out_len % 2 , 0 )
lowerCamelCase__ : Any = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase__: List[str] ):
lowerCamelCase__ : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
lowerCamelCase__ : Any = False
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
lowerCamelCase__ : str = model_class(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : int = True
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCamelCase__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0]
lowerCamelCase__ : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ : Dict = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 631 | 1 |
from __future__ import annotations
from typing import Any
class _a :
def __init__( self: int , UpperCamelCase_: int ) -> None:
"""simple docstring"""
lowercase__ = num_of_nodes
lowercase__ = []
lowercase__ = {}
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int ) -> None:
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int ) -> int:
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCamelCase_ ( self: str , UpperCamelCase_: int ) -> None:
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase__ = self.find_component(UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: list[int] , UpperCamelCase_: int , UpperCamelCase_: int ) -> None:
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
lowercase__ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCamelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowercase__ = self.find_component(UpperCamelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> None:
"""simple docstring"""
lowercase__ = []
lowercase__ = 0
lowercase__ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowercase__ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = self.m_component[u]
lowercase__ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase__ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = self.m_component[u]
lowercase__ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
lowercase__ = [-1] * self.m_num_of_nodes
print(f'The total weight of the minimal spanning tree is: {mst_weight}' )
def _a ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
'''simple docstring'''
from __future__ import annotations
import requests
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(UpperCamelCase__ ).json()
def _a( UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] ='''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
SCREAMING_SNAKE_CASE__ : str =requests.get(UpperCamelCase__ ).json()[:max_stories]
return [get_hackernews_story(UpperCamelCase__ ) for story_id in story_ids]
def _a( UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =hackernews_top_stories(UpperCamelCase__ )
return "\n".join('''* [{title}]({url})'''.format(**UpperCamelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 296 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : List[str] = logging.get_logger(__name__)
def A__ ( __A : Tuple ) ->Any:
__A =DPTConfig()
if "large" in checkpoint_url:
__A =10_24
__A =40_96
__A =24
__A =16
__A =[5, 11, 17, 23]
__A =[2_56, 5_12, 10_24, 10_24]
__A =(1, 3_84, 3_84)
if "ade" in checkpoint_url:
__A =True
__A =1_50
__A ='''huggingface/label-files'''
__A ='''ade20k-id2label.json'''
__A =json.load(open(cached_download(hf_hub_url(__A , __A , repo_type='''dataset''' ) ) , '''r''' ) )
__A ={int(__A ): v for k, v in idalabel.items()}
__A =idalabel
__A ={v: k for k, v in idalabel.items()}
__A =[1, 1_50, 4_80, 4_80]
return config, expected_shape
def A__ ( __A : Any ) ->Any:
__A =['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__A , __A )
def A__ ( __A : Tuple ) ->Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__A =name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
__A =name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
__A =name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
__A =name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
__A =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
__A =name.replace('''proj''' , '''projection''' )
if "blocks" in name:
__A =name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
__A =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__A =name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
__A =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__A =name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
__A =name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
__A =name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
__A =name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
__A =name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
__A =name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
__A =name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
__A =int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__A =name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
__A =name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
__A =name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
__A =name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
__A =name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
__A =name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__A =name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
__A =name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
__A =name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
__A =name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__A =name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
__A =name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
__A =name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
__A =name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
__A =name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
__A =name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
__A =name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
__A =name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
__A =name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
__A =name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
__A =name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
__A =name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def A__ ( __A : Dict , __A : int ) ->List[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__A =state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
__A =state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__A =in_proj_weight[: config.hidden_size, :]
__A =in_proj_bias[: config.hidden_size]
__A =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__A =in_proj_weight[
-config.hidden_size :, :
]
__A =in_proj_bias[-config.hidden_size :]
def A__ ( ) ->Dict:
__A ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
__A =Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def A__ ( __A : Optional[Any] , __A : Any , __A : List[Any] , __A : List[str] ) ->Union[str, Any]:
__A , __A =get_dpt_config(__A )
# load original state_dict from URL
__A =torch.hub.load_state_dict_from_url(__A , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__A )
# rename keys
for key in state_dict.copy().keys():
__A =state_dict.pop(__A )
__A =val
# read in qkv matrices
read_in_q_k_v(__A , __A )
# load HuggingFace model
__A =DPTForSemanticSegmentation(__A ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__A )
model.load_state_dict(__A )
model.eval()
# Check outputs on an image
__A =4_80 if '''ade''' in checkpoint_url else 3_84
__A =DPTImageProcessor(size=__A )
__A =prepare_img()
__A =image_processor(__A , return_tensors='''pt''' )
# forward pass
__A =model(**__A ).logits if '''ade''' in checkpoint_url else model(**__A ).predicted_depth
# Assert logits
__A =torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
__A =torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(__A )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __A , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __A )
)
Path(__A ).mkdir(exist_ok=__A )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__A )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(__A , __A ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__A , )
image_processor.push_to_hub(
repo_path_or_name=Path(__A , __A ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__A , )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
_lowerCamelCase : Any = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 516 |
def A__ ( __A : int , __A : float , __A : float ) ->float:
return round(float(moles / volume ) * nfactor )
def A__ ( __A : float , __A : float , __A : float ) ->float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def A__ ( __A : float , __A : float , __A : float ) ->float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def A__ ( __A : float , __A : float , __A : float ) ->float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 516 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_snake_case : str = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if "xprophetnet" in prophetnet_checkpoint_path:
__snake_case : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(__lowerCamelCase )
__snake_case , __snake_case : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__lowerCamelCase , output_loading_info=__lowerCamelCase )
else:
__snake_case : Optional[int] = ProphetNetForConditionalGenerationOld.from_pretrained(__lowerCamelCase )
__snake_case , __snake_case : Dict = ProphetNetForConditionalGeneration.from_pretrained(
__lowerCamelCase , output_loading_info=__lowerCamelCase )
__snake_case : Union[str, Any] = ["key_proj", "value_proj", "query_proj"]
__snake_case : Tuple = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
__snake_case : Optional[Any] = key.split("." )
if attributes[0] == "lm_head":
__snake_case : Dict = prophet
__snake_case : Tuple = prophet_old
else:
__snake_case : str = prophet.prophetnet
__snake_case : Any = prophet_old.model
__snake_case : Any = False
for attribute in attributes:
if attribute in mapping:
__snake_case : Optional[int] = mapping[attribute]
if not hasattr(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) > 0:
__snake_case : List[str] = attribute
elif hasattr(__lowerCamelCase , __lowerCamelCase ):
__snake_case : List[str] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__snake_case : Dict = old_model.weight
logger.info(F'{attribute} is initialized.' )
__snake_case : int = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__snake_case : int = old_model.bias
logger.info(F'{attribute} is initialized' )
__snake_case : Dict = True
break
elif attribute in special_keys and hasattr(__lowerCamelCase , "in_proj_weight" ):
__snake_case : Any = old_model.in_proj_weight.shape[0] // 3
__snake_case : Dict = getattr(__lowerCamelCase , __lowerCamelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__snake_case : Any = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__snake_case : Tuple = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__snake_case : str = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__snake_case : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__snake_case : List[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__snake_case : Optional[Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__snake_case : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
__snake_case : Tuple = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
__snake_case : List[str] = True
break
if attribute.isdigit():
__snake_case : List[str] = model[int(__lowerCamelCase )]
__snake_case : int = old_model[int(__lowerCamelCase )]
else:
__snake_case : Optional[int] = getattr(__lowerCamelCase , __lowerCamelCase )
if old_attribute == "":
__snake_case : Optional[Any] = old_model
else:
if not hasattr(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
__snake_case : str = getattr(__lowerCamelCase , __lowerCamelCase )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case : int = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 81 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
SCREAMING_SNAKE_CASE__ = {'''bert_for_seq_generation''': 512}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[int] = []
__SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask''']
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<::::>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : int = vocab_file
__a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
__a : Union[str, Any] = self.__dict__.copy()
__a : Any = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : str = {}
__a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : int = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Optional[Any] = []
__a : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__a : Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Tuple = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 47 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : List[Any] = 10
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
_snake_case : Union[str, Any] = [1, 2, 3, 4]
_snake_case : Any = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
_snake_case : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_snake_case : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_snake_case : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
_snake_case : Optional[int] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
_snake_case , _snake_case : Union[str, Any] = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : int = ''''''
_snake_case , _snake_case : Any = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
self.assertEqual(UpperCamelCase_ , [] )
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
_snake_case : List[str] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
_snake_case , _snake_case : Union[str, Any] = process_story(UpperCamelCase_ )
_snake_case : Any = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
_snake_case : Dict = ['''It was the best of times.''']
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_snake_case : List[str] = torch.tensor([1, 2, 3, 4] )
_snake_case : str = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Dict = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_snake_case : Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Union[str, Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_snake_case : Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase_ ( self ) -> Tuple:
"""simple docstring"""
_snake_case : Union[str, Any] = 101
_snake_case : Optional[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_snake_case : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_snake_case : Union[str, Any] = compute_token_type_ids(UpperCamelCase_ , UpperCamelCase_ )
np.testing.assert_array_equal(UpperCamelCase_ , UpperCamelCase_ )
| 713 |
'''simple docstring'''
import os
import numpy
import onnx
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = a.name
_snake_case : List[Any] = b.name
_snake_case : Tuple = ''''''
_snake_case : Tuple = ''''''
_snake_case : Optional[Any] = a == b
_snake_case : List[Any] = name_a
_snake_case : str = name_b
return res
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = list(model.graph.initializer )
_snake_case : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_snake_case : List[Any] = inits[i].name
_snake_case : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = os.path.dirname(lowerCAmelCase_ )
_snake_case : str = os.path.basename(lowerCAmelCase_ )
_snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case : Union[str, Any] = list(model.graph.initializer )
_snake_case : Union[str, Any] = set()
_snake_case : Any = {}
_snake_case : str = []
_snake_case : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCAmelCase_ )
dup_set.add(lowerCAmelCase_ )
_snake_case : List[Any] = inits[j].data_type
_snake_case : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , lowerCAmelCase_ )
total_reduced_size += mem_size
_snake_case : Union[str, Any] = inits[i].name
_snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase_ )
else:
_snake_case : Union[str, Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' )
_snake_case : List[str] = sorted(lowerCAmelCase_ )
_remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : List[str] = '''optimized_''' + model_file_name
_snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
onnx.save(lowerCAmelCase_ , lowerCAmelCase_ )
return new_model
| 47 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple ) -> int:
_lowercase = args.pruning_method
_lowercase = args.threshold
_lowercase = args.model_name_or_path.rstrip('/' )
_lowercase = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
_lowercase = torch.load(os.path.join(snake_case__ , 'pytorch_model.bin' ) )
_lowercase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowercase = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
_lowercase = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
_lowercase = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
_lowercase = MagnitudeBinarizer.apply(inputs=snake_case__ , threshold=snake_case__ )
_lowercase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowercase = name[:-6]
_lowercase = model[F"""{prefix_}mask_scores"""]
_lowercase = TopKBinarizer.apply(snake_case__ , snake_case__ )
_lowercase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowercase = name[:-6]
_lowercase = model[F"""{prefix_}mask_scores"""]
_lowercase = ThresholdBinarizer.apply(snake_case__ , snake_case__ , snake_case__ )
_lowercase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowercase = name[:-6]
_lowercase = model[F"""{prefix_}mask_scores"""]
_lowercase , _lowercase = -0.1, 1.1
_lowercase = torch.sigmoid(snake_case__ )
_lowercase = s * (r - l) + l
_lowercase = s_bar.clamp(min=0.0 , max=1.0 )
_lowercase = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_lowercase = os.path.join(
os.path.dirname(snake_case__ ) , F"""bertarized_{os.path.basename(snake_case__ )}""" )
if not os.path.isdir(snake_case__ ):
shutil.copytree(snake_case__ , snake_case__ )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(snake_case__ , os.path.join(snake_case__ , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
snake_case = parser.parse_args()
main(args) | 67 |
import argparse
import json
from tqdm import tqdm
def _lowercase ( ):
__lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=lowercase__ , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=lowercase__ , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=lowercase__ , help='''where to store parsed gold_data_path file''' , )
__lowerCAmelCase : Any = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__lowerCAmelCase : int = json.load(lowercase__ )
for dpr_record in tqdm(lowercase__ ):
__lowerCAmelCase : Optional[Any] = dpr_record['''question''']
__lowerCAmelCase : Union[str, Any] = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(lowercase__ ) + '''\n''' )
if __name__ == "__main__":
main()
| 492 | 0 |
import os
import sys
import transformers
UpperCamelCase__ = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 700 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : List[Any] = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 486 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__UpperCAmelCase =False
__UpperCAmelCase =True
__UpperCAmelCase =False
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
__UpperCAmelCase =parser.parse_args()
__UpperCAmelCase ={
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
__UpperCAmelCase ={
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
__UpperCAmelCase ="" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
__UpperCAmelCase =reader.read()
__UpperCAmelCase =json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
__UpperCAmelCase =UNetaDModel(**config)
else:
__UpperCAmelCase =UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
__UpperCAmelCase =class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__UpperCAmelCase =dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__UpperCAmelCase =config[key]
del config[key]
__UpperCAmelCase =[k.replace("UNetRes", "") for k in config["down_block_types"]]
__UpperCAmelCase =[k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
__UpperCAmelCase =torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
__UpperCAmelCase ={}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
__UpperCAmelCase =False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
__UpperCAmelCase =param_value
__UpperCAmelCase =True
if not has_changed:
__UpperCAmelCase =param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 546 | '''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__UpperCAmelCase =pytest.mark.integration
__UpperCAmelCase ={"comet"}
__UpperCAmelCase =importlib.util.find_spec("fairseq") is not None
__UpperCAmelCase ={"code_eval"}
__UpperCAmelCase =os.name == "nt"
__UpperCAmelCase ={"bertscore", "frugalscore", "perplexity"}
__UpperCAmelCase =importlib.util.find_spec("transformers") is not None
def __lowerCAmelCase ( UpperCamelCase__ ) -> Any:
@wraps(UpperCamelCase__ )
def wrapper(self , UpperCamelCase__ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self , UpperCamelCase__ )
return wrapper
def __lowerCAmelCase ( UpperCamelCase__ ) -> Any:
@wraps(UpperCamelCase__ )
def wrapper(self , UpperCamelCase__ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self , UpperCamelCase__ )
return wrapper
def __lowerCAmelCase ( UpperCamelCase__ ) -> List[Any]:
@wraps(UpperCamelCase__ )
def wrapper(self , UpperCamelCase__ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self , UpperCamelCase__ )
return wrapper
def __lowerCAmelCase ( ) -> Any:
__lowerCamelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@local
class a__ ( parameterized.TestCase ):
lowerCamelCase : str ={}
lowerCamelCase : Union[str, Any] =None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = '''[...]'''
__lowerCamelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , a ) ).module_path )
__lowerCamelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=a )
# check parameters
__lowerCamelCase = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(a , metric_module.__name__ ):
with self.use_local_metrics():
try:
__lowerCamelCase = doctest.testmod(a , verbose=a , raise_on_error=a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = '''[...]'''
__lowerCamelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , a ) ).module_path )
# run doctest
with self.use_local_metrics():
__lowerCamelCase = doctest.testmod(a , verbose=a , raise_on_error=a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Optional[Any] , a : Any ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](a ):
yield
else:
yield
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
def load_local_metric(a : List[str] , *a : Optional[int] , **a : Tuple ):
return load_metric(os.path.join('''metrics''' , a ) , *a , **a )
with patch('''datasets.load_metric''' ) as mock_load_metric:
__lowerCamelCase = load_local_metric
yield
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , a : Tuple ):
"""simple docstring"""
def wrapper(a : List[Any] ):
__lowerCamelCase = contextmanager(a )
__lowerCamelCase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def __lowerCAmelCase ( UpperCamelCase__ ) -> Union[str, Any]:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class a__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Dict ):
"""simple docstring"""
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
__lowerCamelCase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple:
import torch
def bert_cos_score_idf(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
__lowerCamelCase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def __lowerCAmelCase ( UpperCamelCase__ ) -> List[str]:
def load_from_checkpoint(UpperCamelCase__ ):
class a__ :
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : str , *a : int , **a : Tuple ):
"""simple docstring"""
assert len(a ) == 2
__lowerCamelCase = [0.19, 0.92]
return scores, sum(a ) / len(a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
__lowerCamelCase = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
__lowerCamelCase = load_from_checkpoint
yield
def __lowerCAmelCase ( ) -> List[Any]:
__lowerCamelCase = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
__lowerCamelCase = '''ERROR'''
__lowerCamelCase = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(UpperCamelCase__ , match=re.escape(UpperCamelCase__ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase__ )
| 546 | 1 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
def UpperCAmelCase ( self , lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
with open(lowerCamelCase__ , encoding="utf-8") as input_file:
snake_case__ : Dict = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
snake_case__ : Optional[int] = input_file.read()
snake_case__ : str = regexp.search(lowerCamelCase__)
return match
def UpperCAmelCase ( self , lowerCamelCase__) -> List[Any]:
'''simple docstring'''
with open(lowerCamelCase__ , encoding="utf-8") as input_file:
snake_case__ : str = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
snake_case__ : Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
snake_case__ : Dict = regexp.finditer(lowerCamelCase__)
snake_case__ : str = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = Path("./datasets")
snake_case__ : Any = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase__)):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""")
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
snake_case__ : Dict = Path("./datasets")
snake_case__ : Optional[int] = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase__)):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""")
| 714 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 150 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__snake_case = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__snake_case = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__snake_case = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase (datasets.Metric ):
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string',id='token' ),id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string',id='token' ),id='sequence' ),id='references' ),
} ),)
def snake_case_ ( self: Union[str, Any],A_: List[List[List[str]]],A_: List[List[str]],A_: int = 1,A_: int = 4,):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A_,hypotheses=A_,min_len=A_,max_len=A_ )
}
| 1 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCAmelCase : Tuple = """scheduler_config.json"""
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Optional[Any] = 1
_lowercase : List[str] = 2
_lowercase : int = 3
_lowercase : str = 4
_lowercase : Optional[Any] = 5
@dataclass
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : jnp.ndarray
class __lowerCAmelCase :
_lowercase : Optional[int] = SCHEDULER_CONFIG_NAME
_lowercase : int = ["""dtype"""]
_lowercase : Union[str, Any] = []
_lowercase : List[Any] = True
@classmethod
def _lowercase ( cls , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
a__ , a__ : Union[str, Any] =cls.load_config(
pretrained_model_name_or_path=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ , a__ : Dict =cls.from_config(lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , "create_state" ) and getattr(lowerCAmelCase__ , "has_state" , lowerCAmelCase__ ):
a__ : Dict =scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
self.save_config(save_directory=lowerCAmelCase__ , push_to_hub=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def _lowercase ( cls ) -> List[str]:
'''simple docstring'''
a__ : int =list(set([cls.__name__] + cls._compatibles ) )
a__ : Dict =importlib.import_module(__name__.split("." )[0] )
a__ : str =[
getattr(lowerCAmelCase__ , lowerCAmelCase__ ) for c in compatible_classes_str if hasattr(lowerCAmelCase__ , lowerCAmelCase__ )
]
return compatible_classes
def _A ( SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Tuple[int] ):
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(SCREAMING_SNAKE_CASE ) - x.ndim) ) , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=0.9_9_9 , SCREAMING_SNAKE_CASE : Dict=jnp.floataa ):
"""simple docstring"""
def alpha_bar(SCREAMING_SNAKE_CASE : Union[str, Any] ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
a__ : Union[str, Any] =[]
for i in range(SCREAMING_SNAKE_CASE ):
a__ : List[str] =i / num_diffusion_timesteps
a__ : Union[str, Any] =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(SCREAMING_SNAKE_CASE ) / alpha_bar(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return jnp.array(SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class __lowerCAmelCase :
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
@classmethod
def _lowercase ( cls , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : Tuple =scheduler.config
if config.trained_betas is not None:
a__ : Optional[Any] =jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
a__ : List[str] =jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a__ : Optional[Any] =(
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a__ : Optional[Any] =betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
a__ : Dict =1.0 - betas
a__ : Tuple =jnp.cumprod(lowerCAmelCase__ , axis=0 )
return cls(
alphas=lowerCAmelCase__ , betas=lowerCAmelCase__ , alphas_cumprod=lowerCAmelCase__ , )
def _A ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
"""simple docstring"""
a__ : Optional[int] =state.alphas_cumprod
a__ : List[Any] =alphas_cumprod[timesteps] ** 0.5
a__ : List[Any] =sqrt_alpha_prod.flatten()
a__ : str =broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
a__ : Dict =(1 - alphas_cumprod[timesteps]) ** 0.5
a__ : Any =sqrt_one_minus_alpha_prod.flatten()
a__ : Optional[int] =broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _A ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
"""simple docstring"""
a__ , a__ : Optional[Any] =get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : str =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _A ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
"""simple docstring"""
a__ , a__ : List[Any] =get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : str =sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 563 | 0 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
return np.dot(__A , __A )
class lowercase_ :
def __init__( self , *,
a = np.inf , a = "linear" , a = 0.0 , ):
UpperCamelCase__ = regularization
UpperCamelCase__ = gamma
if kernel == "linear":
UpperCamelCase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
UpperCamelCase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCamelCase__ = f'''Unknown kernel: {kernel}'''
raise ValueError(a )
def __a ( self , a , a ):
return np.dot(a , a )
def __a ( self , a , a ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __a ( self , a , a ):
UpperCamelCase__ = observations
UpperCamelCase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCamelCase__) , ) = np.shape(a )
def to_minimize(a ) -> float:
UpperCamelCase__ = 0
((UpperCamelCase__) , ) = np.shape(a )
for i in range(a ):
for j in range(a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(a )
UpperCamelCase__ = LinearConstraint(a , 0 , 0 )
UpperCamelCase__ = Bounds(0 , self.regularization )
UpperCamelCase__ = minimize(
a , np.ones(a ) , bounds=a , constraints=[ly_contraint] ).x
UpperCamelCase__ = l_star
# calculating mean offset of separation plane to points
UpperCamelCase__ = 0
for i in range(a ):
for j in range(a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCamelCase__ = s / n
def __a ( self , a ):
UpperCamelCase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase ( *__A , __A = None , __A=True , __A=2 ) -> int:
'''simple docstring'''
from .. import __version__
UpperCamelCase__ = take_from
UpperCamelCase__ = ()
if not isinstance(args[0] , __A ):
UpperCamelCase__ = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__A ).base_version ) >= version.parse(__A ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
UpperCamelCase__ = None
if isinstance(__A , __A ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__A ),)
UpperCamelCase__ = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(__A , __A ):
values += (getattr(__A , __A ),)
UpperCamelCase__ = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
UpperCamelCase__ = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
UpperCamelCase__ = warning + " " if standard_warn else ""
warnings.warn(warning + message , __A , stacklevel=__A )
if isinstance(__A , __A ) and len(__A ) > 0:
UpperCamelCase__ = inspect.getouterframes(inspect.currentframe() )[1]
UpperCamelCase__ = call_frame.filename
UpperCamelCase__ = call_frame.lineno
UpperCamelCase__ = call_frame.function
UpperCamelCase__ , UpperCamelCase__ = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(__A ) == 0:
return
elif len(__A ) == 1:
return values[0]
return values
| 223 | 1 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCamelCase_ = "CompVis/stable-diffusion-v1-1"
UpperCamelCase_ = "CompVis/stable-diffusion-v1-2"
UpperCamelCase_ = "CompVis/stable-diffusion-v1-3"
UpperCamelCase_ = "CompVis/stable-diffusion-v1-4"
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A, A, A, A, A, A, A = True, ):
'''simple docstring'''
super()._init_()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionPipeline.from_pretrained(A )
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionPipeline.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = StableDiffusionPipeline.from_pretrained(A )
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline(
vae=A, text_encoder=A, tokenizer=A, unet=A, scheduler=A, safety_checker=A, feature_extractor=A, requires_safety_checker=A, )
self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {k: getattr(self, A ) for k in self.config.keys() if not k.startswith('_' )}
def UpperCamelCase_ ( self, A = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.enable_attention_slicing(A )
@torch.no_grad()
def UpperCamelCase_ ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ):
'''simple docstring'''
return self.pipea(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
@torch.no_grad()
def UpperCamelCase_ ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ):
'''simple docstring'''
return self.pipea(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
@torch.no_grad()
def UpperCamelCase_ ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ):
'''simple docstring'''
return self.pipea(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
@torch.no_grad()
def UpperCamelCase_ ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ):
'''simple docstring'''
return self.pipea(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
@torch.no_grad()
def UpperCamelCase_ ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(A )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : List[Any] = self.textaimg_sda_a(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : Union[str, Any] = self.textaimg_sda_a(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 28 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Any = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_snake_case : Optional[int] = 'pt'
elif is_tf_available():
_snake_case : str = 'tf'
else:
_snake_case : Tuple = 'jax'
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = ByTaTokenizer
a_ = False
def lowercase ( self : str ) -> List[Any]:
super().setUp()
__lowerCAmelCase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Optional[Any] ) -> List[Any]:
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def lowercase ( self : List[str] , **lowerCAmelCase_ : int ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Tuple=2_0 , lowerCAmelCase_ : Union[str, Any]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__lowerCAmelCase = []
for i in range(len(lowerCAmelCase_ ) ):
try:
__lowerCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowerCAmelCase = list(filter(lambda lowerCAmelCase_ : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowerCAmelCase_ ) )
__lowerCAmelCase = list(filter(lambda lowerCAmelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase_ ) , lowerCAmelCase_ ) )
if max_length is not None and len(lowerCAmelCase_ ) > max_length:
__lowerCAmelCase = toks[:max_length]
if min_length is not None and len(lowerCAmelCase_ ) < min_length and len(lowerCAmelCase_ ) > 0:
while len(lowerCAmelCase_ ) < min_length:
__lowerCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCAmelCase = [t[0] for t in toks]
# Ensure consistency
__lowerCAmelCase = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
if " " not in output_txt and len(lowerCAmelCase_ ) > 1:
__lowerCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase_ )
)
if with_prefix_space:
__lowerCAmelCase = ' ' + output_txt
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
return output_txt, output_ids
def lowercase ( self : List[Any] ) -> Any:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
__lowerCAmelCase = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = 'Unicode €.'
__lowerCAmelCase = tokenizer(lowerCAmelCase_ )
__lowerCAmelCase = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded['input_ids'] , lowerCAmelCase_ )
# decoding
__lowerCAmelCase = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , 'Unicode €.</s>' )
__lowerCAmelCase = tokenizer('e è é ê ë' )
__lowerCAmelCase = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded['input_ids'] , lowerCAmelCase_ )
# decoding
__lowerCAmelCase = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def lowercase ( self : Tuple ) -> str:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__lowerCAmelCase = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
if FRAMEWORK != "jax":
__lowerCAmelCase = list(batch.input_ids.numpy()[0] )
else:
__lowerCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertNotIn('decoder_input_ids' , lowerCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = [
'Summary of the text.',
'Another summary.',
]
__lowerCAmelCase = tokenizer(
text_target=lowerCAmelCase_ , max_length=3_2 , padding='max_length' , truncation=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def lowercase ( self : Any ) -> Optional[int]:
__lowerCAmelCase = self.ta_base_tokenizer
__lowerCAmelCase = ['A long paragraph for summarization. </s>']
__lowerCAmelCase = ['Summary of the text. </s>']
# fmt: off
__lowerCAmelCase = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
__lowerCAmelCase = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , text_target=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , batch['input_ids'][0] )
self.assertEqual(lowerCAmelCase_ , batch['labels'][0] )
def lowercase ( self : List[str] ) -> int:
# safety check on max_len default value so we are sure the test works
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = ' He is very happy, UNwant\u00E9d,running'
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
shutil.rmtree(lowerCAmelCase_ )
__lowerCAmelCase = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__lowerCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
__lowerCAmelCase = tokenizer.__class__.from_pretrained(lowerCAmelCase_ , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> List[str]:
__lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__lowerCAmelCase = json.load(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__lowerCAmelCase = json.load(lowerCAmelCase_ )
__lowerCAmelCase = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
__lowerCAmelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
__lowerCAmelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCAmelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCAmelCase = tokenizer_class.from_pretrained(
lowerCAmelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCAmelCase = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowerCAmelCase_ )]
__lowerCAmelCase = tokenizer_class.from_pretrained(
lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_class.from_pretrained(lowerCAmelCase_ )
self.assertTrue(tokenizer.decode([2_5_5] ) == '' )
def lowercase ( self : Optional[Any] ) -> List[Any]:
pass
def lowercase ( self : List[str] ) -> List[Any]:
pass
def lowercase ( self : Optional[int] ) -> str:
pass
def lowercase ( self : Union[str, Any] ) -> Any:
pass
def lowercase ( self : Any ) -> Dict:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__lowerCAmelCase = self.get_tokenizers(fast=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__lowerCAmelCase = tokenizer.convert_tokens_to_string(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> str:
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__lowerCAmelCase = 0
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
for attr in attributes_list:
setattr(lowerCAmelCase_ , attr + '_id' , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + '_id' ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , attr + '_id' , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + '_id' ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(lowerCAmelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 421 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Union[str, Any] ) -> str:
__lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_euler' )
__lowerCAmelCase = 'A painting of a squirrel eating a burger'
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_euler' )
__lowerCAmelCase = 'A painting of a squirrel eating a burger'
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def lowercase ( self : int ) -> Dict:
__lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowerCAmelCase = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
__lowerCAmelCase = 'A painting of a squirrel eating a burger'
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=lowerCAmelCase_ , )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 421 | 1 |
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__( self , A = None , A = None ) -> None:
if arr is None and size is not None:
snake_case : Union[str, Any] = size
snake_case : str = [0] * size
elif arr is not None:
self.init(A )
else:
raise ValueError("""Either arr or size must be specified""" )
def UpperCAmelCase ( self , A ) -> None:
snake_case : Union[str, Any] = len(A )
snake_case : List[str] = deepcopy(A )
for i in range(1 , self.size ):
snake_case : Optional[Any] = self.next_(A )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase ( self ) -> list[int]:
snake_case : Tuple = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case : Optional[Any] = self.next_(A )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase ( A ) -> int:
return index + (index & (-index))
@staticmethod
def UpperCAmelCase ( A ) -> int:
return index - (index & (-index))
def UpperCAmelCase ( self , A , A ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case : Optional[Any] = self.next_(A )
def UpperCAmelCase ( self , A , A ) -> None:
self.add(A , value - self.get(A ) )
def UpperCAmelCase ( self , A ) -> int:
if right == 0:
return 0
snake_case : Union[str, Any] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case : Union[str, Any] = self.prev(A )
return result
def UpperCAmelCase ( self , A , A ) -> int:
return self.prefix(A ) - self.prefix(A )
def UpperCAmelCase ( self , A ) -> int:
return self.query(A , index + 1 )
def UpperCAmelCase ( self , A ) -> int:
value -= self.tree[0]
if value < 0:
return -1
snake_case : Union[str, Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case : List[str] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 587 |
from math import factorial
def SCREAMING_SNAKE_CASE__ ( lowercase = 20 ) -> int:
snake_case : Dict = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case : Dict = n // 2
return int(factorial(lowercase ) / (factorial(lowercase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
lowerCamelCase : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 587 | 1 |
def UpperCAmelCase ( lowercase__ : list ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(lowercase__ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(lowercase__ ) == 1:
return True
a__ = series[1] - series[0]
for index in range(len(lowercase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def UpperCAmelCase ( lowercase__ : list ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(lowercase__ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
a__ = 0
for val in series:
answer += val
return answer / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 412 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowercase : int =logging.get_logger(__name__)
@add_end_docstrings(A_ )
class lowerCAmelCase_ ( A_ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
super().__init__(*lowerCamelCase , **lowerCamelCase )
self.check_model_type(lowerCamelCase )
def _A ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ):
'''simple docstring'''
a__ , a__ = {}, {}
if padding is not None:
a__ = padding
if truncation is not None:
a__ = truncation
if top_k is not None:
a__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , (Image.Image, str) ) and isinstance(lowerCamelCase , lowerCamelCase ):
a__ = {"""image""": image, """question""": question}
else:
a__ = image
a__ = super().__call__(lowerCamelCase , **lowerCamelCase )
return results
def _A ( self , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=False ):
'''simple docstring'''
a__ = load_image(inputs["""image"""] )
a__ = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCamelCase , truncation=lowerCamelCase )
a__ = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
model_inputs.update(lowerCamelCase )
return model_inputs
def _A ( self , lowerCamelCase ):
'''simple docstring'''
a__ = self.model(**lowerCamelCase )
return model_outputs
def _A ( self , lowerCamelCase , lowerCamelCase=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
a__ = self.model.config.num_labels
if self.framework == "pt":
a__ = model_outputs.logits.sigmoid()[0]
a__ , a__ = probs.topk(lowerCamelCase )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
a__ = scores.tolist()
a__ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 412 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
_SCREAMING_SNAKE_CASE = {
"""allenai/longformer-base-4096""": 4_096,
"""allenai/longformer-large-4096""": 4_096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4_096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4_096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
__snake_case = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__snake_case = bs[:]
__snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(a_ )
cs.append(2**8 + n )
n += 1
__snake_case = [chr(a_ ) for n in cs]
return dict(zip(a_ , a_ ) )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
__snake_case = set()
__snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case = char
return pairs
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , snake_case_ : int , snake_case_ : Dict , snake_case_ : List[str]="replace" , snake_case_ : Optional[int]="<s>" , snake_case_ : Dict="</s>" , snake_case_ : int="</s>" , snake_case_ : List[str]="<s>" , snake_case_ : Dict="<unk>" , snake_case_ : Union[str, Any]="<pad>" , snake_case_ : Tuple="<mask>" , snake_case_ : Any=False , **snake_case_ : Union[str, Any] , ):
__snake_case = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
__snake_case = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
__snake_case = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
__snake_case = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
__snake_case = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
__snake_case = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as vocab_handle:
__snake_case = json.load(SCREAMING_SNAKE_CASE__ )
__snake_case = {v: k for k, v in self.encoder.items()}
__snake_case = errors # how to handle errors in decoding
__snake_case = bytes_to_unicode()
__snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as merges_handle:
__snake_case = merges_handle.read().split("\n" )[1:-1]
__snake_case = [tuple(merge.split() ) for merge in bpe_merges]
__snake_case = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__snake_case = {}
__snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__snake_case = re.compile(r"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowerCAmelCase ( self : Optional[Any] ):
return len(self.encoder )
def lowerCAmelCase ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Any , snake_case_ : Any ):
if token in self.cache:
return self.cache[token]
__snake_case = tuple(SCREAMING_SNAKE_CASE__ )
__snake_case = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
__snake_case = min(SCREAMING_SNAKE_CASE__ , key=lambda snake_case_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case = bigram
__snake_case = []
__snake_case = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
__snake_case = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case = tuple(SCREAMING_SNAKE_CASE__ )
__snake_case = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
__snake_case = get_pairs(SCREAMING_SNAKE_CASE__ )
__snake_case = ''' '''.join(SCREAMING_SNAKE_CASE__ )
__snake_case = word
return word
def lowerCAmelCase ( self : str , snake_case_ : List[Any] ):
__snake_case = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ):
__snake_case = ''''''.join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(" " ) )
return bpe_tokens
def lowerCAmelCase ( self : Optional[int] , snake_case_ : int ):
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : Any , snake_case_ : Union[str, Any] ):
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase ( self : Tuple , snake_case_ : Optional[Any] ):
__snake_case = ''''''.join(SCREAMING_SNAKE_CASE__ )
__snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase ( self : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + "\n" )
__snake_case = 0
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__snake_case = token_index
writer.write(" ".join(SCREAMING_SNAKE_CASE__ ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : Any , snake_case_ : Tuple , snake_case_ : Any = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Any , snake_case_ : Optional[int] , snake_case_ : str = None , snake_case_ : Tuple = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def lowerCAmelCase ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : int = None ):
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : List[str] , snake_case_ : Optional[Any] , snake_case_ : Tuple=False , **snake_case_ : Union[str, Any] ):
__snake_case = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()):
__snake_case = ''' ''' + text
return (text, kwargs)
| 163 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : str = None
@staticmethod
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
raise NotImplementedError
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
raise NotImplementedError
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def _UpperCamelCase ( cls ) -> str:
"""simple docstring"""
return f'''`pip install {cls.pip_package or cls.name}`'''
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''optuna'''
@staticmethod
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
return is_optuna_available()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return run_hp_search_optuna(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
return default_hp_space_optuna(SCREAMING_SNAKE_CASE__ )
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Tuple = '''ray'''
SCREAMING_SNAKE_CASE_ : Dict = '''\'ray[tune]\''''
@staticmethod
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
return is_ray_available()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
return run_hp_search_ray(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return default_hp_space_ray(SCREAMING_SNAKE_CASE__ )
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = '''sigopt'''
@staticmethod
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
return is_sigopt_available()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
return run_hp_search_sigopt(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return default_hp_space_sigopt(SCREAMING_SNAKE_CASE__ )
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : int = '''wandb'''
@staticmethod
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
return is_wandb_available()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
return run_hp_search_wandb(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return default_hp_space_wandb(SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __lowerCamelCase ( ) -> str:
__SCREAMING_SNAKE_CASE :Dict = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(a_ ) > 0:
__SCREAMING_SNAKE_CASE :str = available_backends[0].name
if len(a_ ) > 1:
logger.info(
f'''{len(a_ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) ) | 498 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ) -> List[str]:
__a = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
__a = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_UpperCAmelCase )
from datasets import load_dataset
__a = load_dataset('nielsr/rvlcdip-demo' )
__a = dataset['''train'''][0]['''image'''].convert('RGB' )
__a = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__a = model(**_UpperCAmelCase )
__a = outputs.logits
__a = torch.Size((1, 16) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__a = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=_UpperCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 713 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( a_ : str ):
__a = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def SCREAMING_SNAKE_CASE ( a_ : str ):
__a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__a = remove_duplicates(key.upper() )
__a = len(a_ )
# First fill cipher with key characters
__a = {alphabet[i]: char for i, char in enumerate(a_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(a_ ) , 26 ):
__a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__a = alphabet[i - offset]
__a = char
return cipher_alphabet
def SCREAMING_SNAKE_CASE ( a_ : str , a_ : dict[str, str] ):
return "".join(cipher_map.get(a_ , a_ ) for ch in message.upper() )
def SCREAMING_SNAKE_CASE ( a_ : str , a_ : dict[str, str] ):
__a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(a_ , a_ ) for ch in message.upper() )
def SCREAMING_SNAKE_CASE ( ):
__a = input('Enter message to encode or decode: ' ).strip()
__a = input('Enter keyword: ' ).strip()
__a = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
__a = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
__a = create_cipher_map(a_ )
print(func(a_ , a_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 490 | 0 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( __UpperCamelCase : Optional[Any] ) -> str:
"""simple docstring"""
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( __UpperCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCamelCase )
def lowerCamelCase_ ( __UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
_A = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__UpperCamelCase , id=__UpperCamelCase )
def lowerCamelCase_ ( __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
_A = 0
# Doctest custom flag to ignore output.
lowerCAmelCase = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase = doctest.OutputChecker
class lowerCAmelCase_ ( UpperCAmelCase ):
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )-> Tuple:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase = CustomOutputChecker
lowerCAmelCase = HfDoctestModule
lowerCAmelCase = HfDocTestParser
| 292 |
'''simple docstring'''
def lowerCamelCase_ ( __UpperCamelCase : list , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 ) -> int:
"""simple docstring"""
_A = right or len(__UpperCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCamelCase , __UpperCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 1 |
"""simple docstring"""
from timeit import timeit
def _lowerCAmelCase ( __lowerCamelCase:int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
__magic_name__ = 0
while number:
number &= number - 1
result += 1
return result
def _lowerCAmelCase ( __lowerCamelCase:int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
__magic_name__ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _lowerCAmelCase ( ):
'''simple docstring'''
def do_benchmark(__lowerCamelCase:int ) -> None:
__magic_name__ = 'import __main__ as z'
print(f'''Benchmark when {number = }:''' )
print(f'''{get_set_bits_count_using_modulo_operator(lowerCAmelCase__ ) = }''' )
__magic_name__ = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase__ )
print(f'''timeit() runs in {timing} seconds''' )
print(f'''{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase__ ) = }''' )
__magic_name__ = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase__ , )
print(f'''timeit() runs in {timing} seconds''' )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 709 |
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:list ):
'''simple docstring'''
__magic_name__ = len(__lowerCamelCase )
for i in range(1 , __lowerCamelCase ):
__magic_name__ = collection[i]
__magic_name__ = 0
__magic_name__ = i - 1
while low <= high:
__magic_name__ = (low + high) // 2
if val < collection[mid]:
__magic_name__ = mid - 1
else:
__magic_name__ = mid + 1
for j in range(__lowerCamelCase , __lowerCamelCase , -1 ):
__magic_name__ = collection[j - 1]
__magic_name__ = val
return collection
if __name__ == "__main__":
lowercase = input('''Enter numbers separated by a comma:\n''').strip()
lowercase = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 468 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {"vocab_file": "sentencepiece.model"}
lowercase__ : List[Any] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
lowercase__ : Tuple = {
"google/rembert": 256,
}
class a__ ( UpperCamelCase__ ):
a : int = VOCAB_FILES_NAMES
a : Tuple = PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A , A=False , A=True , A=True , A="[CLS]" , A="[SEP]" , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , **A , ) -> int:
'''simple docstring'''
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = spm.SentencePieceProcessor()
self.sp_model.Load(A )
@property
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
a = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
a = self.__dict__.copy()
a = None
return state
def __setstate__( self , A ) -> str:
'''simple docstring'''
a = d
a = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self , A , A=False ) -> List[Any]:
'''simple docstring'''
a = self.sp_model.EncodeAsPieces(A )
return pieces
def lowerCAmelCase_ ( self , A ) -> Dict:
'''simple docstring'''
return self.sp_model.PieceToId(A )
def lowerCAmelCase_ ( self , A ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(A )
def lowerCAmelCase_ ( self , A ) -> List[Any]:
'''simple docstring'''
a = self.sp_model.decode_pieces(A )
return out_string
def lowerCAmelCase_ ( self , A , A = None ) -> List[int]:
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self , A , A = None , A = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def lowerCAmelCase_ ( self , A , A = None ) -> List[int]:
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A ):
logger.error("Vocabulary path ({}) should be a directory".format(A ) )
return
a = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 515 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Dict = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 515 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Any = logging.get_logger(__name__)
def __UpperCAmelCase ( _snake_case : List[str], _snake_case : List[Any]=False ):
_lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __UpperCAmelCase ( _snake_case : str, _snake_case : List[Any], _snake_case : List[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
_lowercase = ""
else:
_lowercase = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[
: config.hidden_size, :
]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( _snake_case : Optional[int] ):
_lowercase = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_snake_case, _snake_case )
def __UpperCAmelCase ( _snake_case : int, _snake_case : Any, _snake_case : Any ):
_lowercase = dct.pop(_snake_case )
_lowercase = val
def __UpperCAmelCase ( ):
_lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowercase = Image.open(requests.get(_snake_case, stream=_snake_case ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( _snake_case : Optional[int], _snake_case : Union[str, Any] ):
_lowercase = ViTConfig()
_lowercase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowercase = True
_lowercase = int(vit_name[-1_2:-1_0] )
_lowercase = int(vit_name[-9:-6] )
else:
_lowercase = 1_0_0_0
_lowercase = "huggingface/label-files"
_lowercase = "imagenet-1k-id2label.json"
_lowercase = json.load(open(hf_hub_download(_snake_case, _snake_case, repo_type="dataset" ), "r" ) )
_lowercase = {int(_snake_case ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = int(vit_name[-6:-4] )
_lowercase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowercase = 1_9_2
_lowercase = 7_6_8
_lowercase = 1_2
_lowercase = 3
elif vit_name[9:].startswith("small" ):
_lowercase = 3_8_4
_lowercase = 1_5_3_6
_lowercase = 1_2
_lowercase = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowercase = 7_6_8
_lowercase = 2_3_0_4
_lowercase = 8
_lowercase = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowercase = 1_0_2_4
_lowercase = 4_0_9_6
_lowercase = 2_4
_lowercase = 1_6
elif vit_name[4:].startswith("huge" ):
_lowercase = 1_2_8_0
_lowercase = 5_1_2_0
_lowercase = 3_2
_lowercase = 1_6
# load original model from timm
_lowercase = timm.create_model(_snake_case, pretrained=_snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowercase = timm_model.state_dict()
if base_model:
remove_classification_head_(_snake_case )
_lowercase = create_rename_keys(_snake_case, _snake_case )
for src, dest in rename_keys:
rename_key(_snake_case, _snake_case, _snake_case )
read_in_q_k_v(_snake_case, _snake_case, _snake_case )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowercase = ViTModel(_snake_case ).eval()
else:
_lowercase = ViTForImageClassification(_snake_case ).eval()
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowercase = DeiTImageProcessor(size=config.image_size )
else:
_lowercase = ViTImageProcessor(size=config.image_size )
_lowercase = image_processor(images=prepare_img(), return_tensors="pt" )
_lowercase = encoding["pixel_values"]
_lowercase = model(_snake_case )
if base_model:
_lowercase = timm_model.forward_features(_snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_snake_case, outputs.pooler_output, atol=1e-3 )
else:
_lowercase = timm_model(_snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_snake_case, outputs.logits, atol=1e-3 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_snake_case )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path) | 227 | """simple docstring"""
from __future__ import annotations
__UpperCamelCase : List[Any] = 1.6021E-19 # units = C
def __UpperCAmelCase ( _snake_case : float, _snake_case : float, _snake_case : float, ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 227 | 1 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase_ = logging.get_logger(__name__)
class _A :
def __init__( self : str , _A : str = None , _A : uuid.UUID = None , _A : Union[str, Any]=None , _A : List[str]=None ) -> Any:
"""simple docstring"""
if not conversation_id:
lowercase : Optional[int] = uuid.uuida()
if past_user_inputs is None:
lowercase : Any = []
if generated_responses is None:
lowercase : Optional[Any] = []
lowercase : uuid.UUID = conversation_id
lowercase : List[str] = past_user_inputs
lowercase : List[str] = generated_responses
lowercase : Optional[str] = text
def __eq__( self : Tuple , _A : List[Any] ) -> Dict:
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __a ( self : Optional[Any] , _A : str , _A : bool = False ) -> Union[str, Any]:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
lowercase : Optional[Any] = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
lowercase : List[Any] = text
def __a ( self : List[Any] ) -> Any:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowercase : Any = None
def __a ( self : List[str] , _A : str ) -> Any:
"""simple docstring"""
self.generated_responses.append(__lowerCamelCase )
def __a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
lowercase : List[Any] = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__lowerCamelCase , R'''\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ''' , )
class _A ( __lowerCamelCase ):
def __init__( self : Optional[int] , *_A : Optional[int] , **_A : int ) -> Tuple:
"""simple docstring"""
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
if self.tokenizer.pad_token_id is None:
lowercase : str = self.tokenizer.eos_token
def __a ( self : Any , _A : int=None , _A : List[str]=None , _A : List[str]=None , **_A : Tuple ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = {}
lowercase : int = {}
lowercase : Dict = {}
if min_length_for_response is not None:
lowercase : Tuple = min_length_for_response
if minimum_tokens is not None:
lowercase : str = minimum_tokens
if "max_length" in generate_kwargs:
lowercase : List[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowercase : Dict = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : int , _A : Union[Conversation, List[Conversation]] , _A : Any=0 , **_A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Any = super().__call__(__lowerCamelCase , num_workers=__lowerCamelCase , **__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) == 1:
return outputs[0]
return outputs
def __a ( self : Union[str, Any] , _A : Conversation , _A : int=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
lowercase : Tuple = self.tokenizer._build_conversation_input_ids(__lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowercase : List[str] = self._legacy_parse_and_tokenize(__lowerCamelCase )
if self.framework == "pt":
lowercase : int = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowercase : List[str] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __a ( self : Dict , _A : Any , _A : Dict=10 , **_A : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase : str = generate_kwargs.get('''max_length''' , self.model.config.max_length )
lowercase : List[Any] = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
lowercase : Tuple = max_length - minimum_tokens
lowercase : List[str] = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
lowercase : List[Any] = model_inputs['''attention_mask'''][:, -trim:]
lowercase : Union[str, Any] = model_inputs.pop('''conversation''' )
lowercase : Any = max_length
lowercase : Dict = self.model.generate(**__lowerCamelCase , **__lowerCamelCase )
if self.model.config.is_encoder_decoder:
lowercase : List[Any] = 1
else:
lowercase : Optional[int] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __a ( self : Dict , _A : int , _A : Any=True ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = model_outputs['''output_ids''']
lowercase : int = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase , )
lowercase : Union[str, Any] = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(__lowerCamelCase )
return conversation
def __a ( self : Tuple , _A : Conversation ) -> Dict:
"""simple docstring"""
lowercase : List[Any] = self.tokenizer.eos_token_id
lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
if len(__lowerCamelCase ) > self.tokenizer.model_max_length:
lowercase : Optional[int] = input_ids[-self.tokenizer.model_max_length :]
return input_ids | 217 |
def _lowercase ( __lowerCamelCase : int ) -> bool:
'''simple docstring'''
UpperCamelCase__ : Tuple = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 344 | 0 |
from string import ascii_lowercase, ascii_uppercase
def _snake_case (_snake_case : str) -> str:
if not sentence:
return ""
_lowercase =dict(zip(_snake_case , _snake_case))
return lower_to_upper.get(sentence[0] , sentence[0]) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 557 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_SCREAMING_SNAKE_CASE = random.Random()
if is_torch_available():
import torch
def _snake_case (_snake_case : str , _snake_case : Optional[Any]=1.0 , _snake_case : int=None , _snake_case : Optional[int]=None) -> Optional[int]:
if rng is None:
_lowercase =global_rng
_lowercase =[]
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :int, snake_case :List[Any], snake_case :List[str]=7, snake_case :Union[str, Any]=400, snake_case :List[Any]=2000, snake_case :Union[str, Any]=1, snake_case :Tuple=0.0, snake_case :Tuple=1_6000, snake_case :Optional[Any]=True, snake_case :List[Any]=True, ):
"""simple docstring"""
_lowercase =parent
_lowercase =batch_size
_lowercase =min_seq_length
_lowercase =max_seq_length
_lowercase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowercase =feature_size
_lowercase =padding_value
_lowercase =sampling_rate
_lowercase =return_attention_mask
_lowercase =do_normalize
def UpperCamelCase__ ( self :int):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self :Tuple, snake_case :Optional[Any]=False, snake_case :int=False):
"""simple docstring"""
def _flatten(snake_case :Optional[int]):
return list(itertools.chain(*snake_case))
if equal_length:
_lowercase =floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
_lowercase =[
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
]
if numpify:
_lowercase =[np.asarray(snake_case) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Any =ASTFeatureExtractor
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =ASTFeatureExtractionTester(self)
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
_lowercase =[floats_list((1, x))[0] for x in range(800, 1400, 200)]
_lowercase =[np.asarray(snake_case) for speech_input in speech_inputs]
# Test not batched input
_lowercase =feat_extract(speech_inputs[0], return_tensors='np').input_values
_lowercase =feat_extract(np_speech_inputs[0], return_tensors='np').input_values
self.assertTrue(np.allclose(snake_case, snake_case, atol=1e-3))
# Test batched
_lowercase =feat_extract(snake_case, padding=snake_case, return_tensors='np').input_values
_lowercase =feat_extract(snake_case, padding=snake_case, return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(snake_case, snake_case):
self.assertTrue(np.allclose(snake_case, snake_case, atol=1e-3))
# Test 2-D numpy arrays are batched.
_lowercase =[floats_list((1, x))[0] for x in (800, 800, 800)]
_lowercase =np.asarray(snake_case)
_lowercase =feat_extract(snake_case, return_tensors='np').input_values
_lowercase =feat_extract(snake_case, return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(snake_case, snake_case):
self.assertTrue(np.allclose(snake_case, snake_case, atol=1e-3))
@require_torch
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
import torch
_lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_lowercase =np.random.rand(100).astype(np.floataa)
_lowercase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowercase =feature_extractor.pad([{'input_values': inputs}], return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
_lowercase =feature_extractor.pad([{'input_values': inputs}], return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def UpperCamelCase__ ( self :Tuple, snake_case :Any):
"""simple docstring"""
from datasets import load_dataset
_lowercase =load_dataset('hf-internal-testing/librispeech_asr_dummy', 'clean', split='validation')
# automatic decoding with librispeech
_lowercase =ds.sort('id').select(range(snake_case))[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9])
# fmt: on
_lowercase =self._load_datasamples(1)
_lowercase =ASTFeatureExtractor()
_lowercase =feature_extractor(snake_case, return_tensors='pt').input_values
self.assertEquals(input_values.shape, (1, 1024, 128))
self.assertTrue(torch.allclose(input_values[0, 0, :30], snake_case, atol=1e-4))
| 557 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ , snake_case_ : Tuple = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : List[str] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1)
# convert to its negative
lowerCAmelCase_ = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 60 | from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCamelCase ( __a ):
a__ :str = ['''image_processor''']
a__ :str = '''SamImageProcessor'''
def __init__(self , __UpperCamelCase ) -> int:
super().__init__(__UpperCamelCase )
UpperCamelCase_ : Any = self.image_processor
UpperCamelCase_ : int = -10
UpperCamelCase_ : Dict = self.image_processor.size["""longest_edge"""]
def __call__(self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase = None , **__UpperCamelCase , ) -> BatchEncoding:
UpperCamelCase_ : int = self.image_processor(
__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
# pop arguments that are not used in the foward but used nevertheless
UpperCamelCase_ : Optional[int] = encoding_image_processor["""original_sizes"""]
if hasattr(__UpperCamelCase , """numpy""" ): # Checks if Torch or TF tensor
UpperCamelCase_ : Dict = original_sizes.numpy()
UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ : str = self._check_and_preprocess_points(
input_points=__UpperCamelCase , input_labels=__UpperCamelCase , input_boxes=__UpperCamelCase , )
UpperCamelCase_ : List[str] = self._normalize_and_convert(
__UpperCamelCase , __UpperCamelCase , input_points=__UpperCamelCase , input_labels=__UpperCamelCase , input_boxes=__UpperCamelCase , return_tensors=__UpperCamelCase , )
return encoding_image_processor
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="pt" , ) -> Tuple:
if input_points is not None:
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
UpperCamelCase_ : str = [
self._normalize_coordinates(self.target_size , __UpperCamelCase , original_sizes[0] ) for point in input_points
]
else:
UpperCamelCase_ : List[str] = [
self._normalize_coordinates(self.target_size , __UpperCamelCase , __UpperCamelCase )
for point, original_size in zip(__UpperCamelCase , __UpperCamelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
UpperCamelCase_,UpperCamelCase_ : Optional[Any] = self._pad_points_and_labels(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : Dict = np.array(__UpperCamelCase )
if input_labels is not None:
UpperCamelCase_ : Optional[Any] = np.array(__UpperCamelCase )
if input_boxes is not None:
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
UpperCamelCase_ : Tuple = [
self._normalize_coordinates(self.target_size , __UpperCamelCase , original_sizes[0] , is_bounding_box=__UpperCamelCase )
for box in input_boxes
]
else:
UpperCamelCase_ : Any = [
self._normalize_coordinates(self.target_size , __UpperCamelCase , __UpperCamelCase , is_bounding_box=__UpperCamelCase )
for box, original_size in zip(__UpperCamelCase , __UpperCamelCase )
]
UpperCamelCase_ : int = np.array(__UpperCamelCase )
if input_boxes is not None:
if return_tensors == "pt":
UpperCamelCase_ : Union[str, Any] = torch.from_numpy(__UpperCamelCase )
# boxes batch size of 1 by default
UpperCamelCase_ : Any = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
UpperCamelCase_ : List[str] = tf.convert_to_tensor(__UpperCamelCase )
# boxes batch size of 1 by default
UpperCamelCase_ : Optional[int] = tf.expand_dims(__UpperCamelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
UpperCamelCase_ : Optional[Any] = torch.from_numpy(__UpperCamelCase )
# point batch size of 1 by default
UpperCamelCase_ : List[Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
UpperCamelCase_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
# point batch size of 1 by default
UpperCamelCase_ : Dict = tf.expand_dims(__UpperCamelCase , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
UpperCamelCase_ : Optional[int] = torch.from_numpy(__UpperCamelCase )
# point batch size of 1 by default
UpperCamelCase_ : Tuple = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
UpperCamelCase_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
# point batch size of 1 by default
UpperCamelCase_ : Dict = tf.expand_dims(__UpperCamelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def A_ (self , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
UpperCamelCase_ : Union[str, Any] = max([point.shape[0] for point in input_points] )
UpperCamelCase_ : Any = []
for i, point in enumerate(__UpperCamelCase ):
if point.shape[0] != expected_nb_points:
UpperCamelCase_ : Optional[int] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
UpperCamelCase_ : Dict = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(__UpperCamelCase )
UpperCamelCase_ : List[Any] = processed_input_points
return input_points, input_labels
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> np.ndarray:
UpperCamelCase_,UpperCamelCase_ : Optional[int] = original_size
UpperCamelCase_,UpperCamelCase_ : List[str] = self.image_processor._get_preprocess_shape(__UpperCamelCase , longest_edge=__UpperCamelCase )
UpperCamelCase_ : str = deepcopy(__UpperCamelCase ).astype(__UpperCamelCase )
if is_bounding_box:
UpperCamelCase_ : int = coords.reshape(-1 , 2 , 2 )
UpperCamelCase_ : Tuple = coords[..., 0] * (new_w / old_w)
UpperCamelCase_ : Optional[int] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
UpperCamelCase_ : Optional[Any] = coords.reshape(-1 , 4 )
return coords
def A_ (self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ) -> List[str]:
if input_points is not None:
if hasattr(__UpperCamelCase , """numpy""" ): # Checks for TF or Torch tensor
UpperCamelCase_ : int = input_points.numpy().tolist()
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not isinstance(input_points[0] , __UpperCamelCase ):
raise ValueError("""Input points must be a list of list of floating points.""" )
UpperCamelCase_ : List[Any] = [np.array(__UpperCamelCase ) for input_point in input_points]
else:
UpperCamelCase_ : str = None
if input_labels is not None:
if hasattr(__UpperCamelCase , """numpy""" ):
UpperCamelCase_ : Any = input_labels.numpy().tolist()
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not isinstance(input_labels[0] , __UpperCamelCase ):
raise ValueError("""Input labels must be a list of list integers.""" )
UpperCamelCase_ : List[str] = [np.array(__UpperCamelCase ) for label in input_labels]
else:
UpperCamelCase_ : List[str] = None
if input_boxes is not None:
if hasattr(__UpperCamelCase , """numpy""" ):
UpperCamelCase_ : Dict = input_boxes.numpy().tolist()
if (
not isinstance(__UpperCamelCase , __UpperCamelCase )
or not isinstance(input_boxes[0] , __UpperCamelCase )
or not isinstance(input_boxes[0][0] , __UpperCamelCase )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
UpperCamelCase_ : Tuple = [np.array(__UpperCamelCase ).astype(np.floataa ) for box in input_boxes]
else:
UpperCamelCase_ : List[str] = None
return input_points, input_labels, input_boxes
@property
def A_ (self ) -> List[str]:
UpperCamelCase_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(__UpperCamelCase ) )
def A_ (self , *__UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]:
return self.image_processor.post_process_masks(*__UpperCamelCase , **__UpperCamelCase )
| 635 | 0 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _lowerCamelCase( a , a ):
__a = old_name
if "patch_embed" in old_name:
__a = old_name.split("." )
if layer == "0":
__a = old_name.replace("0" , "convolution1" )
elif layer == "1":
__a = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
__a = old_name.replace("3" , "convolution2" )
else:
__a = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , _lowerCamelCase ):
__a = r"\b\d{2}\b"
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__a = re.search(R"\d\.\d\d." , _lowerCamelCase ).group()
else:
__a = re.search(R"\d\.\d." , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__a = old_name.replace(_lowerCamelCase , "" )
__a = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
__a = "intermediate_stages." + trimmed_name
else:
__a = old_name.replace(_lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
__a = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
__a = str(int(match[2] ) - num_meta4D_last_stage )
__a = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
__a = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
__a = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
__a = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
__a = trimmed_name.replace("fc2" , "linear_out" )
__a = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , _lowerCamelCase ):
__a = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
__a = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__a = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__a = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
__a = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
__a = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
__a = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
__a = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__a = new_name.replace("norm" , "layernorm" )
__a = "efficientformer." + new_name
else:
__a = "efficientformer.encoder." + new_name
return new_name
def _lowerCamelCase( a , a ):
for key in checkpoint.copy().keys():
__a = checkpoint.pop(_lowerCamelCase )
__a = val
return checkpoint
def _lowerCamelCase( ):
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _lowerCamelCase( a , a , a , a ):
__a = torch.load(_lowerCamelCase , map_location="cpu" )["model"]
__a = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__a = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__a = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
__a = config.depths[-1] - config.num_metaad_blocks + 1
__a = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__a = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
__a = prepare_img()
__a = 2_5_6
__a = 2_2_4
__a = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
__a = processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
__a = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__a = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__a = model(_lowerCamelCase )
__a = outputs.logits
__a = (1, 1_0_0_0)
if "l1" in model_name:
__a = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :1_0] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__a = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :1_0] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__a = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(_lowerCamelCase )
print(F"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"Bearnardd/{pytorch_dump_path}" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=F"Bearnardd/{pytorch_dump_path}" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 704 | """simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCamelCase( a , a , a ):
__a = OmegaConf.load(a )
__a = torch.load(a , map_location="cpu" )["model"]
__a = list(state_dict.keys() )
# extract state_dict for VQVAE
__a = {}
__a = "first_stage_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
# extract state_dict for UNetLDM
__a = {}
__a = "model.diffusion_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
__a = config.model.params.first_stage_config.params
__a = config.model.params.unet_config.params
__a = VQModel(**a ).eval()
vqvae.load_state_dict(a )
__a = UNetLDMModel(**a ).eval()
unet.load_state_dict(a )
__a = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=a , )
__a = LDMPipeline(a , a , a )
pipeline.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 67 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase_ : List[str] = logging.get_logger(__name__)
lowercase_ : Union[str, Any] = '''▁'''
lowercase_ : int = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowercase_ : Dict = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowercase_ : List[str] = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1024,
}
# fmt: off
lowercase_ : Optional[Any] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[Any] = ["input_ids", "attention_mask"]
_UpperCamelCase : List[int] = []
_UpperCamelCase : List[int] = []
def __init__( self : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Optional[Any]="</s>" , lowerCamelCase_ : Dict="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : List[Any]="<unk>" , lowerCamelCase_ : Optional[Any]="<pad>" , lowerCamelCase_ : Tuple="<mask>" , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : Any , ):
'''simple docstring'''
_snake_case : Union[str, Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
_snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
_snake_case : Optional[Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
_snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase_ ) )
_snake_case : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_snake_case : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_snake_case : Optional[Any] = 1
_snake_case : Optional[int] = len(self.sp_model )
_snake_case : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCamelCase_ )
}
_snake_case : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
_snake_case : Dict = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_snake_case : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_snake_case : Optional[int] = src_lang if src_lang is not None else 'en_XX'
_snake_case : Optional[Any] = self.lang_code_to_id[self._src_lang]
_snake_case : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Any ):
'''simple docstring'''
_snake_case : Optional[Any] = self.__dict__.copy()
_snake_case : Any = None
return state
def __setstate__( self : List[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_snake_case : Optional[Any] = {}
_snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self : str , lowerCamelCase_ : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : str ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_snake_case : Optional[int] = self.sp_model.PieceToId(lowerCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : Any = []
_snake_case : int = ''
_snake_case : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
_snake_case : Any = True
_snake_case : Optional[Any] = []
else:
current_sub_tokens.append(lowerCamelCase_ )
_snake_case : List[Any] = False
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case : str = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , 'wb' ) as fi:
_snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
_snake_case : List[Any] = [1] * len(self.prefix_tokens )
_snake_case : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase_ )) + ([0] * len(lowerCamelCase_ )) + suffix_ones
def __UpperCAmelCase ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] , lowerCamelCase_ : Optional[str] , **lowerCamelCase_ : str ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_snake_case : List[str] = src_lang
_snake_case : Optional[int] = self(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : Any = self.convert_tokens_to_ids(lowerCamelCase_ )
_snake_case : List[Any] = tgt_lang_id
return inputs
def __UpperCAmelCase ( self : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : str = "en_XX" , lowerCamelCase_ : Optional[List[str]] = None , lowerCamelCase_ : str = "ro_RO" , **lowerCamelCase_ : str , ):
'''simple docstring'''
_snake_case : Optional[Any] = src_lang
_snake_case : str = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : Optional[int] = self.lang_code_to_id[src_lang]
_snake_case : Tuple = [self.cur_lang_code_id]
_snake_case : Tuple = [self.eos_token_id]
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : int = self.lang_code_to_id[tgt_lang]
_snake_case : Optional[int] = [self.cur_lang_code_id]
_snake_case : Optional[Any] = [self.eos_token_id]
| 304 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
lowercase_ : List[Any] = logging.get_logger(__name__)
lowercase_ : str = {'''vocab_file''': '''vocab.txt'''}
lowercase_ : Union[str, Any] = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
lowercase_ : Optional[Any] = {
'''facebook/esm2_t6_8M_UR50D''': 1024,
'''facebook/esm2_t12_35M_UR50D''': 1024,
}
def A__( __lowerCAmelCase ):
with open(__lowerCAmelCase , 'r' ) as f:
_snake_case : List[str] = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Any = ["input_ids", "attention_mask"]
def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any]="<unk>" , lowerCamelCase_ : int="<cls>" , lowerCamelCase_ : Tuple="<pad>" , lowerCamelCase_ : Optional[int]="<mask>" , lowerCamelCase_ : List[str]="<eos>" , **lowerCamelCase_ : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : List[Any] = load_vocab_file(lowerCamelCase_ )
_snake_case : Tuple = dict(enumerate(self.all_tokens ) )
_snake_case : str = {tok: ind for ind, tok in enumerate(self.all_tokens )}
_snake_case : List[str] = unk_token
_snake_case : Optional[int] = cls_token
_snake_case : Union[str, Any] = pad_token
_snake_case : str = mask_token
_snake_case : Union[str, Any] = eos_token
_snake_case : Optional[Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Any , **lowerCamelCase_ : Any ):
'''simple docstring'''
return text.split()
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Any=False ):
'''simple docstring'''
return len(self._id_to_token )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : int ):
'''simple docstring'''
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id]
_snake_case : Optional[Any] = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : List , lowerCamelCase_ : Optional[List] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
_snake_case : Optional[int] = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase_ ) + [1]
return mask
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = os.path.join(lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(lowerCamelCase_ , 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=lowerCamelCase_ )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Union[List[str], List[AddedToken]] , lowerCamelCase_ : bool = False ):
'''simple docstring'''
return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ )
| 304 | 1 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a__ : List[Any] = logging.get_logger(__name__)
def _UpperCamelCase ( __A , __A , __A , __A ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(__A , __A , __A=0 , __A=None ):
UpperCamelCase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCamelCase__ = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCamelCase__ = math.ceil(val / multiple ) * multiple
return x
UpperCamelCase__ = (output_size, output_size) if isinstance(__A , __A ) else output_size
UpperCamelCase__ , UpperCamelCase__ = get_image_size(__A )
UpperCamelCase__ , UpperCamelCase__ = output_size
# determine new height and width
UpperCamelCase__ = output_height / input_height
UpperCamelCase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCamelCase__ = scale_width
else:
# fit height
UpperCamelCase__ = scale_height
UpperCamelCase__ = constraint_to_multiple_of(scale_height * input_height , multiple=__A )
UpperCamelCase__ = constraint_to_multiple_of(scale_width * input_width , multiple=__A )
return (new_height, new_width)
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = False , a = 1 , a = True , a = 1 / 2_55 , a = True , a = None , a = None , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"height": 3_84, "width": 3_84}
UpperCamelCase__ = get_size_dict(a )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = keep_aspect_ratio
UpperCamelCase__ = ensure_multiple_of
UpperCamelCase__ = resample
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self , a , a , a = False , a = 1 , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(
a , output_size=(size["height"], size["width"]) , keep_aspect_ratio=a , multiple=a , )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a )
UpperCamelCase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
def __a ( self , a , a = None ):
UpperCamelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a ):
UpperCamelCase__ = target_sizes.numpy()
UpperCamelCase__ = []
for idx in range(len(a ) ):
UpperCamelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=a )
UpperCamelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
UpperCamelCase__ = logits.argmax(dim=1 )
UpperCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 223 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase_ ( a__ ):
@staticmethod
@abstractmethod
def __a ( a ):
raise NotImplementedError()
@abstractmethod
def __a ( self ):
raise NotImplementedError()
| 223 | 1 |
SCREAMING_SNAKE_CASE__ = '''Tobias Carryer'''
from time import time
class _UpperCamelCase:
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=int(time() ) ): # noqa: B008
'''simple docstring'''
__a : Union[str, Any] = multiplier
__a : List[Any] = increment
__a : Dict = modulo
__a : Dict = seed
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
SCREAMING_SNAKE_CASE__ = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 47 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''roberta'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Tuple="absolute" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = vocab_size
__a : Tuple = hidden_size
__a : List[str] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : str = hidden_act
__a : Optional[Any] = intermediate_size
__a : Dict = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[Any] = max_position_embeddings
__a : Dict = type_vocab_size
__a : str = initializer_range
__a : List[str] = layer_norm_eps
__a : Optional[int] = position_embedding_type
__a : Union[str, Any] = use_cache
__a : str = classifier_dropout
class _UpperCamelCase( __lowerCamelCase ):
@property
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 47 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : Optional[int] = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : Tuple = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = """ctrl"""
__UpperCAmelCase = ["""past_key_values"""]
__UpperCAmelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Tuple , snake_case_ : Dict=2_4_6_5_3_4 , snake_case_ : Optional[int]=2_5_6 , snake_case_ : Dict=1_2_8_0 , snake_case_ : Union[str, Any]=8_1_9_2 , snake_case_ : Any=4_8 , snake_case_ : List[Any]=1_6 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Optional[Any]=1e-6 , snake_case_ : List[Any]=0.0_2 , snake_case_ : Dict=True , **snake_case_ : List[Any] , ):
'''simple docstring'''
snake_case__ : Any = vocab_size
snake_case__ : int = n_positions
snake_case__ : Optional[int] = n_embd
snake_case__ : str = n_layer
snake_case__ : Any = n_head
snake_case__ : str = dff
snake_case__ : Any = resid_pdrop
snake_case__ : Tuple = embd_pdrop
snake_case__ : List[str] = layer_norm_epsilon
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = use_cache
super().__init__(**snake_case_ )
| 502 | 0 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = int(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = t // 36_00, (t // 60) % 60, t % 60
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int=3_00 ) -> str:
'''simple docstring'''
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
UpperCAmelCase_ = f"""{elt:.6f}""" if isinstance(snake_case_ , snake_case_ ) else str(snake_case_ )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __A :
a__ : Dict = 5
a__ : Any = 0.2
def __init__(self : Optional[Any] , __a : int , __a : Optional[str] = None , __a : bool = True , __a : Optional["NotebookTrainingTracker"] = None , __a : int = 300 , ):
UpperCAmelCase_ = total
UpperCAmelCase_ = "" if prefix is None else prefix
UpperCAmelCase_ = leave
UpperCAmelCase_ = parent
UpperCAmelCase_ = width
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def _lowercase (self : str , __a : int , __a : bool = False , __a : str = None ):
UpperCAmelCase_ = value
if comment is not None:
UpperCAmelCase_ = comment
if self.last_value is None:
UpperCAmelCase_ = UpperCAmelCase_ = time.time()
UpperCAmelCase_ = UpperCAmelCase_ = value
UpperCAmelCase_ = UpperCAmelCase_ = None
UpperCAmelCase_ = self.warmup
UpperCAmelCase_ = 1
self.update_bar(__a )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
UpperCAmelCase_ = self.elapsed_time / (value - self.start_value)
else:
UpperCAmelCase_ = None
if value >= self.total:
UpperCAmelCase_ = self.total
UpperCAmelCase_ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
UpperCAmelCase_ = self.average_time_per_item * (self.total - value)
self.update_bar(__a )
UpperCAmelCase_ = value
UpperCAmelCase_ = current_time
if self.average_time_per_item is None:
UpperCAmelCase_ = 1
else:
UpperCAmelCase_ = max(int(self.update_every / self.average_time_per_item ) , 1 )
def _lowercase (self : Optional[int] , __a : Tuple , __a : List[Any]=None ):
UpperCAmelCase_ = " " * (len(str(self.total ) ) - len(str(__a ) )) + str(__a )
if self.elapsed_time is None:
UpperCAmelCase_ = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
UpperCAmelCase_ = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
UpperCAmelCase_ = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
f""" {format_time(self.predicted_remaining )}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]"""
self.display()
def _lowercase (self : Dict ):
UpperCAmelCase_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
UpperCAmelCase_ = disp.display(disp.HTML(self.html_code ) , display_id=__a )
else:
self.output.update(disp.HTML(self.html_code ) )
def _lowercase (self : Optional[Any] ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class __A ( UpperCamelCase__ ):
def __init__(self : List[Any] , __a : List[Any] , __a : Dict=None ):
super().__init__(__a )
UpperCAmelCase_ = None if column_names is None else [column_names]
UpperCAmelCase_ = None
def _lowercase (self : str ):
UpperCAmelCase_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
UpperCAmelCase_ = disp.display(disp.HTML(self.html_code ) , display_id=__a )
else:
self.output.update(disp.HTML(self.html_code ) )
def _lowercase (self : List[str] , __a : Union[str, Any] ):
if self.inner_table is None:
UpperCAmelCase_ = [list(values.keys() ), list(values.values() )]
else:
UpperCAmelCase_ = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__a )
UpperCAmelCase_ = columns
self.inner_table.append([values[c] for c in columns] )
def _lowercase (self : Dict , __a : Optional[int] , __a : int=None , __a : int=300 ):
UpperCAmelCase_ = NotebookProgressBar(__a , prefix=__a , parent=self , width=__a )
return self.child_bar
def _lowercase (self : Dict ):
UpperCAmelCase_ = None
self.display()
class __A ( UpperCamelCase__ ):
def __init__(self : Dict ):
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = False
def _lowercase (self : int , __a : Optional[Any] , __a : Any , __a : List[str] , **__a : Union[str, Any] ):
UpperCAmelCase_ = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
UpperCAmelCase_ = NotebookTrainingTracker(state.max_steps , __a )
def _lowercase (self : Union[str, Any] , __a : str , __a : Dict , __a : Dict , **__a : List[Any] ):
UpperCAmelCase_ = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
UpperCAmelCase_ = False
def _lowercase (self : List[Any] , __a : str , __a : Optional[Any] , __a : Optional[int] , __a : Tuple=None , **__a : List[str] ):
if not has_length(__a ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
UpperCAmelCase_ = self.training_tracker.add_child(len(__a ) )
else:
UpperCAmelCase_ = NotebookProgressBar(len(__a ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _lowercase (self : int , __a : List[str] , __a : Union[str, Any] , __a : Optional[int] , **__a : Tuple ):
if self.prediction_bar is not None:
self.prediction_bar.close()
UpperCAmelCase_ = None
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Any , __a : List[Any] , __a : Tuple=None , **__a : List[Any] ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
UpperCAmelCase_ = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
UpperCAmelCase_ = state.global_step
self.training_tracker.write_line(__a )
def _lowercase (self : Optional[int] , __a : str , __a : Tuple , __a : Optional[int] , __a : Union[str, Any]=None , **__a : List[Any] ):
if self.training_tracker is not None:
UpperCAmelCase_ = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history ):
if "loss" in log:
UpperCAmelCase_ = log["loss"]
break
if self.first_column == "Epoch":
UpperCAmelCase_ = int(state.epoch )
else:
UpperCAmelCase_ = state.global_step
UpperCAmelCase_ = "eval"
for k in metrics:
if k.endswith("_loss" ):
UpperCAmelCase_ = re.sub(r"\_loss$" , "" , __a )
UpperCAmelCase_ = metrics.pop("total_flos" , __a )
UpperCAmelCase_ = metrics.pop("epoch" , __a )
UpperCAmelCase_ = metrics.pop(f"""{metric_key_prefix}_runtime""" , __a )
UpperCAmelCase_ = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , __a )
UpperCAmelCase_ = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , __a )
UpperCAmelCase_ = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , __a )
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
UpperCAmelCase_ = v
else:
UpperCAmelCase_ = k.split("_" )
UpperCAmelCase_ = " ".join([part.capitalize() for part in splits[1:]] )
UpperCAmelCase_ = v
self.training_tracker.write_line(__a )
self.training_tracker.remove_child()
UpperCAmelCase_ = None
# Evaluation takes a long time so we should force the next update.
UpperCAmelCase_ = True
def _lowercase (self : Dict , __a : Dict , __a : Any , __a : Optional[Any] , **__a : Union[str, Any] ):
self.training_tracker.update(
state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=__a )
UpperCAmelCase_ = None
| 78 |
import itertools
import math
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
_A = 2
while True:
if is_prime(_snake_case ):
yield num
num += 1
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 10_001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _snake_case ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = IFImgaImgSuperResolutionPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
lowercase__ = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase ( self , __a , __a=0) -> Tuple:
'''simple docstring'''
if str(__a).startswith('''mps'''):
_UpperCamelCase = torch.manual_seed(__a)
else:
_UpperCamelCase = torch.Generator(device=__a).manual_seed(__a)
_UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a)).to(__a)
_UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__a)).to(__a)
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 78 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=16 , __a=36 , __a=6 , __a=6 , __a=6 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = embedding_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_hidden_groups
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AlbertModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = AlbertForPreTraining(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , sentence_order_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AlbertForMaskedLM(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = AlbertForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = AlbertForSequenceClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = AlbertForTokenClassification(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = AlbertForMultipleChoice(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def UpperCAmelCase ( self , __a , __a , __a=False) -> List[str]:
'''simple docstring'''
_UpperCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a)
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a)
return inputs_dict
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AlbertModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase = type
self.model_tester.create_and_check_model(*__a)
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = AlbertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AlbertModel.from_pretrained('''albert-base-v2''')
_UpperCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_UpperCamelCase = model(__a , attention_mask=__a)[0]
_UpperCamelCase = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4))
| 78 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _A :
def __init__( self : List[Any] , __magic_name__ : Collection[float] | None = None ) -> None:
"""simple docstring"""
if components is None:
__snake_case : Tuple = []
__snake_case : List[str] = list(__magic_name__ )
def __len__( self : Optional[Any] ) -> int:
"""simple docstring"""
return len(self.__components )
def __str__( self : str ) -> str:
"""simple docstring"""
return "(" + ",".join(map(__magic_name__ , self.__components ) ) + ")"
def __add__( self : int , __magic_name__ : Vector ) -> Vector:
"""simple docstring"""
__snake_case : List[str] = len(self )
if size == len(__magic_name__ ):
__snake_case : Any = [self.__components[i] + other.component(__magic_name__ ) for i in range(__magic_name__ )]
return Vector(__magic_name__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self : List[Any] , __magic_name__ : Vector ) -> Vector:
"""simple docstring"""
__snake_case : Union[str, Any] = len(self )
if size == len(__magic_name__ ):
__snake_case : Dict = [self.__components[i] - other.component(__magic_name__ ) for i in range(__magic_name__ )]
return Vector(__magic_name__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self : List[Any] , __magic_name__ : float ) -> Vector:
"""simple docstring"""
...
@overload
def __mul__( self : Any , __magic_name__ : Vector ) -> float:
"""simple docstring"""
...
def __mul__( self : Dict , __magic_name__ : float | Vector ) -> float | Vector:
"""simple docstring"""
if isinstance(__magic_name__ , (float, int) ):
__snake_case : Optional[int] = [c * other for c in self.__components]
return Vector(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ) and len(self ) == len(__magic_name__ ):
__snake_case : Tuple = len(self )
__snake_case : int = [self.__components[i] * other.component(__magic_name__ ) for i in range(__magic_name__ )]
return sum(__magic_name__ )
else: # error case
raise Exception("""invalid operand!""" )
def lowercase__ ( self : Dict ) -> Vector:
"""simple docstring"""
return Vector(self.__components )
def lowercase__ ( self : List[str] , __magic_name__ : int ) -> float:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : float ) -> None:
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
__snake_case : str = value
def lowercase__ ( self : Any ) -> float:
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__snake_case : str = [c**2 for c in self.__components]
return math.sqrt(sum(__magic_name__ ) )
def lowercase__ ( self : Tuple , __magic_name__ : Vector , __magic_name__ : bool = False ) -> float:
"""simple docstring"""
__snake_case : Tuple = self * other
__snake_case : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _a ( _lowerCamelCase ) -> Vector:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase )
return Vector([0] * dimension )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Vector:
"""simple docstring"""
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (isinstance(_lowerCamelCase , _lowerCamelCase ))
__snake_case : List[Any] = [0] * dimension
__snake_case : Union[str, Any] = 1
return Vector(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Vector:
"""simple docstring"""
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and (isinstance(_lowerCamelCase , (int, float) ))
)
return x * scalar + y
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Vector:
"""simple docstring"""
random.seed(_lowerCamelCase )
__snake_case : List[Any] = [random.randint(_lowerCamelCase , _lowerCamelCase ) for _ in range(_lowerCamelCase )]
return Vector(_lowerCamelCase )
class _A :
def __init__( self : List[Any] , __magic_name__ : list[list[float]] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
__snake_case : Tuple = matrix
__snake_case : List[str] = w
__snake_case : Union[str, Any] = h
def __str__( self : str ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Tuple , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__snake_case : Tuple = []
for i in range(self.__height ):
__snake_case : Optional[Any] = [
self.__matrix[i][j] + other.component(__magic_name__ , __magic_name__ )
for j in range(self.__width )
]
matrix.append(__magic_name__ )
return Matrix(__magic_name__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self : int , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__snake_case : Optional[Any] = []
for i in range(self.__height ):
__snake_case : Union[str, Any] = [
self.__matrix[i][j] - other.component(__magic_name__ , __magic_name__ )
for j in range(self.__width )
]
matrix.append(__magic_name__ )
return Matrix(__magic_name__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self : int , __magic_name__ : float ) -> Matrix:
"""simple docstring"""
...
@overload
def __mul__( self : List[Any] , __magic_name__ : Vector ) -> Vector:
"""simple docstring"""
...
def __mul__( self : Dict , __magic_name__ : float | Vector ) -> Vector | Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ): # matrix-vector
if len(__magic_name__ ) == self.__width:
__snake_case : Optional[int] = zero_vector(self.__height )
for i in range(self.__height ):
__snake_case : List[Any] = [
self.__matrix[i][j] * other.component(__magic_name__ )
for j in range(self.__width )
]
ans.change_component(__magic_name__ , sum(__magic_name__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(__magic_name__ , (int, float) ): # matrix-scalar
__snake_case : Tuple = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__magic_name__ , self.__width , self.__height )
return None
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.__height
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
return self.__width
def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : int ) -> float:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def lowercase__ ( self : Dict , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float ) -> None:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__snake_case : Optional[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : int ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__snake_case : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__magic_name__ ) ):
__snake_case : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__magic_name__ , self.__width - 1 , self.__height - 1 ).determinant()
def lowercase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : int ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__magic_name__ , __magic_name__ )
else:
raise Exception("""Indices out of bounds""" )
def lowercase__ ( self : Optional[int] ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__snake_case : Tuple = [
self.__matrix[0][y] * self.cofactor(0 , __magic_name__ ) for y in range(self.__width )
]
return sum(__magic_name__ )
def _a ( _lowerCamelCase ) -> Matrix:
"""simple docstring"""
__snake_case : list[list[float]] = [[0] * n for _ in range(_lowerCamelCase )]
return Matrix(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Matrix:
"""simple docstring"""
random.seed(_lowerCamelCase )
__snake_case : list[list[float]] = [
[random.randint(_lowerCamelCase , _lowerCamelCase ) for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )
]
return Matrix(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 26 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,)
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=a__ , )
assert hasattr(self , '''env''')
def __snake_case ( self , a__=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=a__ , instance_type=self.instance_type , debugger_hook_config=a__ , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def __snake_case ( self , a__):
"""simple docstring"""
TrainingJobAnalytics(a__).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_lowerCamelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_lowerCamelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''])
_lowerCamelCase : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get('''TrainingTimeInSeconds''' , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy)
assert all(t <= self.results['''eval_loss'''] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''') as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , a__)
| 114 | 0 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
# load base model
a_ : List[str] = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
a_ : Dict = load_file(lowercase_ )
a_ : List[Any] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
a_ : Optional[Any] = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
a_ : str = pipeline.text_encoder
else:
a_ : Any = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
a_ : Union[str, Any] = pipeline.unet
# find the target layer
a_ : List[str] = layer_infos.pop(0 )
while len(lowercase_ ) > -1:
try:
a_ : Optional[Any] = curr_layer.__getattr__(lowercase_ )
if len(lowercase_ ) > 0:
a_ : List[str] = layer_infos.pop(0 )
elif len(lowercase_ ) == 0:
break
except Exception:
if len(lowercase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
a_ : Dict = layer_infos.pop(0 )
a_ : str = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(lowercase_ )
else:
pair_keys.append(lowercase_ )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
a_ : List[str] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
a_ : Optional[int] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
a_ : str = state_dict[pair_keys[0]].to(torch.floataa )
a_ : List[Any] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ )
# update visited list
for item in pair_keys:
visited.append(lowercase_ )
return pipeline
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = args.base_model_path
__lowerCamelCase = args.checkpoint_path
__lowerCamelCase = args.dump_path
__lowerCamelCase = args.lora_prefix_unet
__lowerCamelCase = args.lora_prefix_text_encoder
__lowerCamelCase = args.alpha
__lowerCamelCase = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__lowerCamelCase = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 720 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowerCamelCase = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
__lowerCamelCase = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
__lowerCamelCase = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
__lowerCamelCase = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
__lowerCamelCase = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
__lowerCamelCase = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
__lowerCamelCase = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def _a ( ):
a_ , a_ : List[Any] = randrange(len(__UpperCamelCase ) ), randrange(len(__UpperCamelCase ) )
a_ : List[Any] = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
a_ , a_ : Optional[Any] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _a ( __UpperCamelCase = 1_0_0 ):
return (generate_random_hand() for _ in range(__UpperCamelCase ))
@pytest.mark.parametrize("""hand, expected""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : Union[str, Any] = PokerHand(__UpperCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , __UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase ).compare_with(PokerHand(__UpperCamelCase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
assert PokerHand(__UpperCamelCase ).compare_with(PokerHand(__UpperCamelCase ) ) == expected
def _a ( ):
a_ : int = [PokerHand(__UpperCamelCase ) for hand in SORTED_HANDS]
a_ : Dict = poker_hands.copy()
shuffle(__UpperCamelCase )
a_ : Dict = chain(sorted(__UpperCamelCase ) )
for index, hand in enumerate(__UpperCamelCase ):
assert hand == poker_hands[index]
def _a ( ):
# Test that five high straights are compared correctly.
a_ : str = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=__UpperCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _a ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a_ : List[str] = PokerHand("""2C 4S AS 3D 5C""" )
a_ : Dict = True
a_ : List[Any] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _a ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a_ : Tuple = 0
a_ : Optional[Any] = os.path.abspath(os.path.dirname(__UpperCamelCase ) )
a_ : Optional[int] = os.path.join(__UpperCamelCase , """poker_hands.txt""" )
with open(__UpperCamelCase ) as file_hand:
for line in file_hand:
a_ : Dict = line[:1_4].strip()
a_ : int = line[1_5:].strip()
a_ , a_ : Optional[int] = PokerHand(__UpperCamelCase ), PokerHand(__UpperCamelCase )
a_ : str = player.compare_with(__UpperCamelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 478 | 0 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase_ = float('nan')
class _A :
def __init__( self : str , _A : Tuple ) -> Any:
"""simple docstring"""
lowercase : Optional[Any] = sys.stdout
lowercase : List[Any] = open(__lowerCamelCase , '''a''' )
def __getattr__( self : Optional[int] , _A : int ) -> str:
"""simple docstring"""
return getattr(self.stdout , __lowerCamelCase )
def __a ( self : List[str] , _A : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.stdout.write(__lowerCamelCase )
# strip tqdm codes
self.file.write(re.sub(r'''^.*\r''' , '''''' , __lowerCamelCase , 0 , re.M ) )
def snake_case( __magic_name__=80 , __magic_name__=False ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] = []
# deal with critical env vars
lowercase : Optional[int] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
lowercase : List[str] = os.environ.get(__lowerCamelCase , __lowerCamelCase )
if val is not None:
cmd.append(F"""{key}={val}""" )
# python executable (not always needed if the script is executable)
lowercase : Dict = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(__lowerCamelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowercase : Dict = []
lowercase : Union[str, Any] = ''''''
while len(__lowerCamelCase ) > 0:
current_line += F"""{cmd.pop(0 )} """
if len(__lowerCamelCase ) == 0 or len(__lowerCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__lowerCamelCase )
lowercase : Union[str, Any] = ''''''
return "\\\n".join(__lowerCamelCase )
def snake_case( __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] = re.sub(r'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
lowercase : str = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += F""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
lowercase : List[Any] = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
lowercase : int = subprocess.run(__lowerCamelCase , capture_output=__lowerCamelCase , text=__lowerCamelCase )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
lowercase : Dict = variation.replace(''' ''' , '''-''' )
with open(Path(__lowerCamelCase ) / F"""log.{prefix}.stdout.txt""" , '''w''' ) as f:
f.write(result.stdout )
with open(Path(__lowerCamelCase ) / F"""log.{prefix}.stderr.txt""" , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(F"""{output_dir}/all_results.json""" , '''r''' , encoding='''utf-8''' ) as f:
lowercase : Union[str, Any] = json.load(__lowerCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Dict:
'''simple docstring'''
lowercase : List[Any] = []
lowercase : List[str] = []
lowercase : Optional[int] = F"""{id}: {variation:<{longest_variation_len}}"""
lowercase : List[str] = F"""{preamble}: """
lowercase : List[str] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__lowerCamelCase ) , desc=__lowerCamelCase , leave=__lowerCamelCase ):
lowercase : Optional[int] = process_run_single(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase : Dict = single_run_metrics[target_metric_key]
if not math.isnan(__lowerCamelCase ):
metrics.append(__lowerCamelCase )
results.append(__lowerCamelCase )
outcome += "✓"
else:
outcome += "✘"
lowercase : List[str] = F"""\33[2K\r{outcome}"""
if len(__lowerCamelCase ) > 0:
lowercase : Optional[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
lowercase : Optional[Any] = round(mean_metrics[target_metric_key] , 2 )
lowercase : str = F"""{outcome} {mean_target}"""
if len(__lowerCamelCase ) > 1:
results_str += F""" {tuple(round(__lowerCamelCase , 2 ) for x in results )}"""
print(__lowerCamelCase )
lowercase : Optional[int] = variation
return mean_metrics
else:
print(__lowerCamelCase )
return {variation_key: variation, target_metric_key: nan}
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return F"""\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"""
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : Any = pd.DataFrame(__lowerCamelCase )
lowercase : Any = '''variation'''
lowercase : Union[str, Any] = '''diff_%'''
lowercase : Union[str, Any] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
lowercase : int = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__lowerCamelCase ):
# as a fallback, use the minimal value as the sentinel
lowercase : Union[str, Any] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__lowerCamelCase ):
lowercase : Union[str, Any] = df.apply(
lambda __magic_name__ : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
lowercase : int = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowercase : Dict = df.reindex(__lowerCamelCase , axis='''columns''' ) # reorder cols
# capitalize
lowercase : Union[str, Any] = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
lowercase : List[Any] = df.rename(lambda __magic_name__ : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
lowercase : Optional[int] = df.rename(lambda __magic_name__ : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
lowercase : Optional[int] = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__lowerCamelCase , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__lowerCamelCase , floatfmt='''.2f''' )]
print('''\n\n'''.join(__lowerCamelCase ) )
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=__lowerCamelCase , type=__lowerCamelCase , nargs='''+''' , required=__lowerCamelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=__lowerCamelCase , type=__lowerCamelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=__lowerCamelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=__lowerCamelCase , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=__lowerCamelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=__lowerCamelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
lowercase : int = parser.parse_args()
lowercase : List[Any] = args.output_dir
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
lowercase : str = get_base_command(__lowerCamelCase , __lowerCamelCase )
# split each dimension into its --foo variations
lowercase : Any = [list(map(str.strip , re.split(r'''\|''' , __lowerCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowercase : Dict = list(map(str.strip , map(''' '''.join , itertools.product(*__lowerCamelCase ) ) ) )
lowercase : int = max(len(__lowerCamelCase ) for x in variations )
# split wanted keys
lowercase : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowercase : Any = F"""benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"""
print(F"""\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt""" )
print(F"""and this script\'s output is also piped into {report_fn}""" )
lowercase : str = Tee(__lowerCamelCase )
print(F"""\n*** Running {len(__lowerCamelCase )} benchmarks:""" )
print(F"""Base command: {' '.join(__lowerCamelCase )}""" )
lowercase : List[Any] = '''variation'''
lowercase : Tuple = []
for id, variation in enumerate(tqdm(__lowerCamelCase , desc='''Total completion: ''' , leave=__lowerCamelCase ) ):
lowercase : str = base_cmd + variation.split()
results.append(
process_run(
id + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , args.target_metric_key , __lowerCamelCase , args.repeat_times , __lowerCamelCase , args.verbose , ) )
process_results(__lowerCamelCase , args.target_metric_key , __lowerCamelCase , args.base_variation , __lowerCamelCase )
if __name__ == "__main__":
main() | 217 |
_SCREAMING_SNAKE_CASE : str = 8.3_144_598
def _lowercase ( __lowerCamelCase : float ,__lowerCamelCase : float ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_SCREAMING_SNAKE_CASE : List[str] = 300
_SCREAMING_SNAKE_CASE : Any = 28
_SCREAMING_SNAKE_CASE : Any = rms_speed_of_molecule(temperature, molar_mass)
print(F'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 548 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCamelCase__ = CLIPImageProcessor()
UpperCamelCase__ = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCamelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 548 | 1 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Union[str, Any] ) -> str:
assert isinstance(__lowercase , __lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : str , __A : Tuple ) -> Dict:
_SCREAMING_SNAKE_CASE = tmp_path / "cache"
_SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : int , __A : Optional[int] ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = tmp_path / "cache"
_SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(__lowercase , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : str , __A : List[Any] ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = tmp_path / "cache"
_SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , split=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Optional[int] , __A : Tuple ) -> str:
if issubclass(__lowercase , __lowercase ):
_SCREAMING_SNAKE_CASE = parquet_path
elif issubclass(__lowercase , __lowercase ):
_SCREAMING_SNAKE_CASE = [parquet_path]
_SCREAMING_SNAKE_CASE = tmp_path / "cache"
_SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : List[Any] , __A : str=("train",) ) -> List[str]:
assert isinstance(__lowercase , __lowercase )
for split in splits:
_SCREAMING_SNAKE_CASE = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : List[Any] , __A : str ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = tmp_path / "cache"
_SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Any , __A : Optional[int] ) -> List[str]:
_SCREAMING_SNAKE_CASE = tmp_path / "cache"
_SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE = ParquetDatasetReader({"train": parquet_path} , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Optional[int] , __A : List[str] ) -> Dict:
if split:
_SCREAMING_SNAKE_CASE = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE = "train"
_SCREAMING_SNAKE_CASE = {"train": parquet_path, "test": parquet_path}
_SCREAMING_SNAKE_CASE = tmp_path / "cache"
_SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Any ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = ParquetDatasetWriter(__lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE = pq.ParquetFile(tmp_path / "foo.parquet" )
_SCREAMING_SNAKE_CASE = pf.read()
assert dataset.data.table == output_table
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Union[str, Any] ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = str(shared_datadir / "test_image_rgb.jpg" )
_SCREAMING_SNAKE_CASE = {"image": [image_path]}
_SCREAMING_SNAKE_CASE = Features({"image": Image()} )
_SCREAMING_SNAKE_CASE = Dataset.from_dict(__lowercase , features=__lowercase )
_SCREAMING_SNAKE_CASE = ParquetDatasetWriter(__lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Dict ) -> Any:
assert get_writer_batch_size(__lowercase ) == expected
| 418 | import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__SCREAMING_SNAKE_CASE : List[str] = Mapping[str, np.ndarray]
__SCREAMING_SNAKE_CASE : List[Any] = Mapping[str, Any] # Is a nested dict.
__SCREAMING_SNAKE_CASE : List[Any] = 0.01
@dataclasses.dataclass(frozen=__snake_case )
class lowercase_ :
_lowerCamelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_lowerCamelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_lowerCamelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_lowerCamelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_lowerCamelCase = None
# Templates used to generate this protein (prediction-only)
_lowerCamelCase = None
# Chain corresponding to each parent
_lowerCamelCase = None
def snake_case (__lowercase ) -> Protein:
'''simple docstring'''
_snake_case : str = r"(\[[A-Z]+\]\n)"
_snake_case : List[str] = [tag.strip() for tag in re.split(__lowercase , __lowercase ) if len(__lowercase ) > 0]
_snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
_snake_case : List[str] = ["N", "CA", "C"]
_snake_case : Any = None
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = None
for g in groups:
if "[PRIMARY]" == g[0]:
_snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowercase ) ):
if seq[i] not in residue_constants.restypes:
_snake_case : Tuple = "X" # FIXME: strings are immutable
_snake_case : int = np.array(
[residue_constants.restype_order.get(__lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowercase , g[1][axis].split() ) ) )
_snake_case : Dict = np.array(__lowercase )
_snake_case : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : List[Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
_snake_case : Any = np.zeros(
(
len(__lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : Dict = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowercase , atom_mask=__lowercase , aatype=__lowercase , residue_index=np.arange(len(__lowercase ) ) , b_factors=__lowercase , )
def snake_case (__lowercase , __lowercase = 0 ) -> List[str]:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[Any] = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
_snake_case : str = prot.parents
_snake_case : str = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_snake_case : int = [p for i, p in zip(__lowercase , __lowercase ) if i == chain_id]
if parents is None or len(__lowercase ) == 0:
_snake_case : Optional[int] = ["N/A"]
pdb_headers.append(F"""PARENT {' '.join(__lowercase )}""" )
return pdb_headers
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[int] = pdb_str.split("\n" )
_snake_case : List[str] = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
_snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_snake_case : str = []
if prot.parents_chain_index is not None:
_snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowercase ) , [] )
parent_dict[str(__lowercase )].append(__lowercase )
_snake_case : Any = max([int(__lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_snake_case : Tuple = parent_dict.get(str(__lowercase ) , ["N/A"] )
parents_per_chain.append(__lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_snake_case : List[str] = [["N/A"]]
def make_parent_line(__lowercase ) -> str:
return F"""PARENT {' '.join(__lowercase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_snake_case : int = 0
for i, l in enumerate(__lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowercase ):
_snake_case : Tuple = parents_per_chain[chain_counter]
else:
_snake_case : str = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowercase ) )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = residue_constants.restypes + ["X"]
def res_atoa(__lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
_snake_case : Optional[int] = residue_constants.atom_types
_snake_case : List[str] = []
_snake_case : Tuple = prot.atom_mask
_snake_case : List[str] = prot.aatype
_snake_case : int = prot.atom_positions
_snake_case : int = prot.residue_index.astype(np.intaa )
_snake_case : List[Any] = prot.b_factors
_snake_case : str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
_snake_case : Union[str, Any] = get_pdb_headers(__lowercase )
if len(__lowercase ) > 0:
pdb_lines.extend(__lowercase )
_snake_case : Optional[Any] = aatype.shape[0]
_snake_case : str = 1
_snake_case : Tuple = 0
_snake_case : int = string.ascii_uppercase
_snake_case : Optional[Any] = None
# Add all atom sites.
for i in range(__lowercase ):
_snake_case : Dict = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_snake_case : List[Any] = "ATOM"
_snake_case : Union[str, Any] = atom_name if len(__lowercase ) == 4 else F""" {atom_name}"""
_snake_case : str = ""
_snake_case : str = ""
_snake_case : Any = 1.00
_snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
_snake_case : Dict = ""
_snake_case : Any = "A"
if chain_index is not None:
_snake_case : List[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_snake_case : Optional[int] = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
_snake_case : Dict = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_snake_case : Optional[int] = True
_snake_case : Union[str, Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
_snake_case : List[str] = "TER"
_snake_case : str = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowercase , __lowercase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowercase , remark=__lowercase , parents=__lowercase , parents_chain_index=__lowercase , ) | 670 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowercase_ : Optional[Any] = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Dict = "upernet"
def __init__( self : Optional[int] , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]=5_12 , lowerCamelCase_ : List[str]=0.02 , lowerCamelCase_ : Union[str, Any]=[1, 2, 3, 6] , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : int=0.4 , lowerCamelCase_ : List[Any]=3_84 , lowerCamelCase_ : Optional[int]=2_56 , lowerCamelCase_ : List[Any]=1 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=2_55 , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_snake_case : Tuple = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : List[Any] = backbone_config.get('model_type' )
_snake_case : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
_snake_case : str = config_class.from_dict(lowerCamelCase_ )
_snake_case : Optional[int] = backbone_config
_snake_case : List[Any] = hidden_size
_snake_case : Any = initializer_range
_snake_case : List[Any] = pool_scales
_snake_case : str = use_auxiliary_head
_snake_case : Union[str, Any] = auxiliary_loss_weight
_snake_case : Any = auxiliary_in_channels
_snake_case : Union[str, Any] = auxiliary_channels
_snake_case : List[str] = auxiliary_num_convs
_snake_case : Dict = auxiliary_concat_input
_snake_case : Optional[int] = loss_ignore_index
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = copy.deepcopy(self.__dict__ )
_snake_case : Dict = self.backbone_config.to_dict()
_snake_case : Dict = self.__class__.model_type
return output
| 652 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
lowercase_ : List[Any] = object()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
_snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def A__( __lowerCAmelCase ):
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def A__( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = _get_partition_rules()
_snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase )
_snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
_snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 652 | 1 |
from math import factorial
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(__magic_name__ ) // (factorial(__magic_name__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
F'fifty-two card deck is: {combinations(5_2, 5)}\n',
)
print(
'If a class of 40 students must be arranged into groups of',
F'4 for group projects, there are {combinations(4_0, 4)} ways',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
F'are {combinations(1_0, 3)} ways that first, second and',
'third place can be awarded.',
)
| 15 |
"""simple docstring"""
from collections import namedtuple
lowerCAmelCase__ = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def snake_case_ ( A_ : float, A_ : str, A_ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(A_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(A_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __UpperCAmelCase ( lowerCamelCase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
return x + 2
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'x = 3'
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : int = evaluate(snake_case__ ,{} ,state=snake_case__ )
assert result == 3
self.assertDictEqual(snake_case__ ,{'x': 3} )
SCREAMING_SNAKE_CASE_ : Dict = 'x = y'
SCREAMING_SNAKE_CASE_ : List[str] = {'y': 5}
SCREAMING_SNAKE_CASE_ : Tuple = evaluate(snake_case__ ,{} ,state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case__ ,{'x': 5, 'y': 5} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = 'y = add_two(x)'
SCREAMING_SNAKE_CASE_ : List[str] = {'x': 3}
SCREAMING_SNAKE_CASE_ : int = evaluate(snake_case__ ,{'add_two': add_two} ,state=snake_case__ )
assert result == 5
self.assertDictEqual(snake_case__ ,{'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate(snake_case__ ,{} ,state=snake_case__ )
assert result is None
assert "tried to execute add_two" in out.out
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = 'x = 3'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = evaluate(snake_case__ ,{} ,state=snake_case__ )
assert result == 3
self.assertDictEqual(snake_case__ ,{'x': 3} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
SCREAMING_SNAKE_CASE_ : int = {'x': 3}
SCREAMING_SNAKE_CASE_ : Optional[int] = evaluate(snake_case__ ,{'add_two': add_two} ,state=snake_case__ )
self.assertDictEqual(snake_case__ ,{'x': 3, 'y': 5} )
self.assertDictEqual(snake_case__ ,{'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = 'x = 3\ny = 5'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate(snake_case__ ,{} ,state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case__ ,{'x': 3, 'y': 5} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = 'text = f\'This is x: {x}.\''
SCREAMING_SNAKE_CASE_ : Any = {'x': 3}
SCREAMING_SNAKE_CASE_ : List[str] = evaluate(snake_case__ ,{} ,state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(snake_case__ ,{'x': 3, 'text': 'This is x: 3.'} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
SCREAMING_SNAKE_CASE_ : Dict = {'x': 3}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = evaluate(snake_case__ ,{} ,state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(snake_case__ ,{'x': 3, 'y': 2} )
SCREAMING_SNAKE_CASE_ : Dict = {'x': 8}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = evaluate(snake_case__ ,{} ,state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case__ ,{'x': 8, 'y': 5} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 'test_list = [x, add_two(x)]'
SCREAMING_SNAKE_CASE_ : Dict = {'x': 3}
SCREAMING_SNAKE_CASE_ : Dict = evaluate(snake_case__ ,{'add_two': add_two} ,state=snake_case__ )
self.assertListEqual(snake_case__ ,[3, 5] )
self.assertDictEqual(snake_case__ ,{'x': 3, 'test_list': [3, 5]} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = 'y = x'
SCREAMING_SNAKE_CASE_ : Dict = {'x': 3}
SCREAMING_SNAKE_CASE_ : List[Any] = evaluate(snake_case__ ,{} ,state=snake_case__ )
assert result == 3
self.assertDictEqual(snake_case__ ,{'x': 3, 'y': 3} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'test_list = [x, add_two(x)]\ntest_list[1]'
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'x': 3}
SCREAMING_SNAKE_CASE_ : int = evaluate(snake_case__ ,{'add_two': add_two} ,state=snake_case__ )
assert result == 5
self.assertDictEqual(snake_case__ ,{'x': 3, 'test_list': [3, 5]} )
SCREAMING_SNAKE_CASE_ : Tuple = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
SCREAMING_SNAKE_CASE_ : List[Any] = {'x': 3}
SCREAMING_SNAKE_CASE_ : Tuple = evaluate(snake_case__ ,{'add_two': add_two} ,state=snake_case__ )
assert result == 5
self.assertDictEqual(snake_case__ ,{'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'x = 0\nfor i in range(3):\n x = i'
SCREAMING_SNAKE_CASE_ : int = {}
SCREAMING_SNAKE_CASE_ : Optional[int] = evaluate(snake_case__ ,{'range': range} ,state=snake_case__ )
assert result == 2
self.assertDictEqual(snake_case__ ,{'x': 2, 'i': 2} )
| 685 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 1 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_lowerCAmelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_lowerCAmelCase = """main"""
# Default branch name
_lowerCAmelCase = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
_lowerCAmelCase = """aaaaaaa"""
# This commit does not exist, so we should 404.
_lowerCAmelCase = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
_lowerCAmelCase = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def lowercase ( ) -> List[str]:
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def lowercase ( ) -> Union[str, Any]:
print("Bonjour!" )
yield
print("Au revoir!" )
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class UpperCAmelCase__ ( unittest.TestCase ):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def snake_case_ ( self , A__ ):
"""simple docstring"""
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def snake_case_ ( self , A__ ):
"""simple docstring"""
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def snake_case_ ( self , A__ ):
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def snake_case_ ( self ):
"""simple docstring"""
self.assertEqual(find_labels(A__ ) , ["labels"] )
self.assertEqual(find_labels(A__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(A__ ) , ["start_positions", "end_positions"] )
class UpperCAmelCase__ ( snake_case__ ):
pass
self.assertEqual(find_labels(A__ ) , ["labels"] )
@require_tf
def snake_case_ ( self ):
"""simple docstring"""
self.assertEqual(find_labels(A__ ) , ["labels"] )
self.assertEqual(find_labels(A__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(A__ ) , ["start_positions", "end_positions"] )
class UpperCAmelCase__ ( snake_case__ ):
pass
self.assertEqual(find_labels(A__ ) , ["labels"] )
@require_flax
def snake_case_ ( self ):
"""simple docstring"""
self.assertEqual(find_labels(A__ ) , [] )
self.assertEqual(find_labels(A__ ) , [] )
self.assertEqual(find_labels(A__ ) , [] )
class UpperCAmelCase__ ( snake_case__ ):
pass
self.assertEqual(find_labels(A__ ) , [] ) | 137 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 137 | 1 |
def UpperCamelCase ( lowercase_: int = 1000000 ) -> int:
A__ : Optional[int] = set(range(3 , lowercase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowercase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase_ , lowercase_ ) ) )
A__ : str = [float(lowercase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase_ , limit + 1 , lowercase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 706 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCamelCase =get_logger()
UpperCamelCase =None
class A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
super().__init__(features=A__ )
import jax
from jaxlib.xla_client import Device
if isinstance(A__ , A__ ):
raise ValueError(
F"Expected {device} to be a `str` not {type(A__ )}, as `jaxlib.xla_extension.Device` "
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
UpperCamelCase_ : Optional[int] = device if isinstance(A__ , A__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase_ : Union[str, Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"Device with string identifier {self.device} not listed among the available "
F"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
F"device: {str(jax.devices()[0] )}." )
UpperCamelCase_ : List[str] = str(jax.devices()[0] )
UpperCamelCase_ : str = jnp_array_kwargs
@staticmethod
def _UpperCAmelCase ( ):
import jax
return {str(A__ ): device for device in jax.devices()}
def _UpperCAmelCase ( self , __lowerCAmelCase ):
import jax
import jax.numpy as jnp
if isinstance(A__ , A__ ) and column:
if all(
isinstance(A__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A__ , axis=0 )
return column
def _UpperCAmelCase ( self , __lowerCAmelCase ):
import jax
import jax.numpy as jnp
if isinstance(A__ , (str, bytes, type(A__ )) ):
return value
elif isinstance(A__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase_ : Tuple = {}
if isinstance(A__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCamelCase_ : Any = {"dtype": jnp.intaa}
else:
UpperCamelCase_ : List[str] = {"dtype": jnp.intaa}
elif isinstance(A__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase_ : Optional[int] = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A__ , PIL.Image.Image ):
UpperCamelCase_ : List[Any] = np.asarray(A__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase_ : Union[str, Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A__ , **{**default_dtype, **self.jnp_array_kwargs} )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A__ , """__array__""" ) and not isinstance(A__ , jax.Array ):
UpperCamelCase_ : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A__ ) for substruct in data_struct] )
elif isinstance(A__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A__ ) for substruct in data_struct] )
return self._tensorize(A__ )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
return map_nested(self._recursive_tensorize , A__ , map_list=A__ )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[Any] = self.numpy_arrow_extractor().extract_row(A__ )
UpperCamelCase_ : List[Any] = self.python_features_decoder.decode_row(A__ )
return self.recursive_tensorize(A__ )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : int = self.numpy_arrow_extractor().extract_column(A__ )
UpperCamelCase_ : Any = self.python_features_decoder.decode_column(A__ , pa_table.column_names[0] )
UpperCamelCase_ : Any = self.recursive_tensorize(A__ )
UpperCamelCase_ : Any = self._consolidate(A__ )
return column
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[Any] = self.numpy_arrow_extractor().extract_batch(A__ )
UpperCamelCase_ : List[str] = self.python_features_decoder.decode_batch(A__ )
UpperCamelCase_ : Any = self.recursive_tensorize(A__ )
for column_name in batch:
UpperCamelCase_ : str = self._consolidate(batch[column_name] )
return batch
| 208 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self : List[str] , A__ : List[Any] , A__ : int=7 , A__ : Union[str, Any]=3 , A__ : List[str]=30 , A__ : Optional[int]=4_00 , A__ : Optional[Any]=True , A__ : Optional[int]=None , A__ : Optional[Any]=True , A__ : Any=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : Any=True , A__ : int=1 / 2_55 , A__ : List[str]=True , ) -> Dict:
'''simple docstring'''
snake_case_ : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case_ : Any = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : Dict = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : List[Any] = image_std
snake_case_ : Tuple = do_rescale
snake_case_ : Any = rescale_factor
snake_case_ : Optional[int] = do_pad
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self : Optional[int] , A__ : Optional[int] , A__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
snake_case_ : Any = image_inputs[0]
if isinstance(A__ , Image.Image ):
snake_case_ ,snake_case_ : Dict = image.size
else:
snake_case_ ,snake_case_ : int = image.shape[1], image.shape[2]
if w < h:
snake_case_ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case_ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : List[Any] = self.size["shortest_edge"]
else:
snake_case_ : str = []
for image in image_inputs:
snake_case_ ,snake_case_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : List[Any] = max(A__ , key=lambda A__ : item[0] )[0]
snake_case_ : int = max(A__ , key=lambda A__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , A__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , A__ )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
snake_case_ : int = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[str] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Optional[int] = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Dict = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : int = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : Optional[Any] = json.loads(f.read() )
snake_case_ : int = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case_ : Optional[int] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case_ : Any = image_processing(images=A__ , annotations=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Any = json.loads(f.read() )
snake_case_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case_ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : Union[str, Any] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case_ : str = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : int = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Optional[int] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify masks
snake_case_ : Union[str, Any] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A__ )
# verify orig_size
snake_case_ : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
| 666 | 0 |
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowercase__ : Any = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
lowercase__ : Optional[int] = logging.WARNING
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = os.getenv('DATASETS_VERBOSITY' , lowerCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCamelCase( ):
return __name__.split('.' )[0]
def UpperCamelCase( ):
return logging.getLogger(_get_library_name() )
def UpperCamelCase( ):
UpperCAmelCase : List[str] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCamelCase( ):
UpperCAmelCase : Dict = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCamelCase( UpperCAmelCase_ = None ):
if name is None:
UpperCAmelCase : Union[str, Any] = _get_library_name()
return logging.getLogger(lowerCamelCase__ )
def UpperCamelCase( ):
return _get_library_root_logger().getEffectiveLevel()
def UpperCamelCase( UpperCAmelCase_ ):
_get_library_root_logger().setLevel(lowerCamelCase__ )
def UpperCamelCase( ):
return set_verbosity(lowerCamelCase__ )
def UpperCamelCase( ):
return set_verbosity(lowerCamelCase__ )
def UpperCamelCase( ):
return set_verbosity(lowerCamelCase__ )
def UpperCamelCase( ):
return set_verbosity(lowerCamelCase__ )
def UpperCamelCase( ):
UpperCAmelCase : Dict = False
def UpperCamelCase( ):
UpperCAmelCase : Optional[Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class A_ :
'''simple docstring'''
def __init__( self : List[str] , *lowercase_ : List[Any] , **lowercase_ : Dict ) -> Union[str, Any]: # pylint: disable=unused-argument
UpperCAmelCase : List[Any] = args[0] if args else None
def __iter__( self : str ) -> Optional[Any]:
return iter(self._iterator )
def __getattr__( self : Any , lowercase_ : Union[str, Any] ) -> int:
def empty_fn(*lowercase_ : List[Any] , **lowercase_ : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Tuple ) -> str:
return self
def __exit__( self : Optional[int] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : int ) -> List[str]:
return
lowercase__ : Optional[Any] = True
class A_ :
'''simple docstring'''
def __call__( self : Union[str, Any] , *lowercase_ : Union[str, Any] , lowercase_ : Tuple=False , **lowercase_ : List[Any] ) -> Union[str, Any]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCamelCase_ , **UpperCamelCase_ )
else:
return EmptyTqdm(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCAmelCase_ ( self : int , *lowercase_ : Any , **lowercase_ : str ) -> int:
UpperCAmelCase : Dict = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase__ : List[str] = _tqdm_cls()
def UpperCamelCase( ):
global _tqdm_active
return bool(_tqdm_active )
def UpperCamelCase( ):
global _tqdm_active
UpperCAmelCase : Dict = True
def UpperCamelCase( ):
global _tqdm_active
UpperCAmelCase : List[Any] = False
| 710 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 0 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
a_ = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
a_ = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
a_ = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = None ,lowercase__ = False ,) -> Optional[int]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
a_ = new_id
# turn into Numpy arrays
a_ = np.array(lowercase__ )
a_ = np.array(lowercase__ )
if reduce_labels:
a_ = 255
a_ = label - 1
a_ = 255
a_ = label != ignore_index
a_ = np.not_equal(lowercase__ ,lowercase__ )
a_ = pred_label[mask]
a_ = np.array(lowercase__ )[mask]
a_ = pred_label[pred_label == label]
a_ = np.histogram(lowercase__ ,bins=lowercase__ ,range=(0, num_labels - 1) )[0]
a_ = np.histogram(lowercase__ ,bins=lowercase__ ,range=(0, num_labels - 1) )[0]
a_ = np.histogram(lowercase__ ,bins=lowercase__ ,range=(0, num_labels - 1) )[0]
a_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = None ,lowercase__ = False ,) -> Optional[Any]:
'''simple docstring'''
a_ = np.zeros((num_labels,) ,dtype=np.floataa )
a_ = np.zeros((num_labels,) ,dtype=np.floataa )
a_ = np.zeros((num_labels,) ,dtype=np.floataa )
a_ = np.zeros((num_labels,) ,dtype=np.floataa )
for result, gt_seg_map in zip(lowercase__ ,lowercase__ ):
a_ , a_ , a_ , a_ = intersect_and_union(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = None ,lowercase__ = None ,lowercase__ = False ,) -> Optional[int]:
'''simple docstring'''
a_ , a_ , a_ , a_ = total_intersect_and_union(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# compute metrics
a_ = {}
a_ = total_area_intersect.sum() / total_area_label.sum()
a_ = total_area_intersect / total_area_union
a_ = total_area_intersect / total_area_label
a_ = np.nanmean(lowercase__ )
a_ = np.nanmean(lowercase__ )
a_ = all_acc
a_ = iou
a_ = acc
if nan_to_num is not None:
a_ = {metric: np.nan_to_num(lowercase__ ,nan=lowercase__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _lowerCAmelCase ( self: Dict) ->str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))),
}) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def _lowerCAmelCase ( self: Dict , a: Optional[int] , a: Union[str, Any] , a: int , a: bool , a: Optional[int] = None , a: Optional[Dict[int, int]] = None , a: bool = False , ) ->str:
'''simple docstring'''
a_ = mean_iou(
results=a , gt_seg_maps=a , num_labels=a , ignore_index=a , nan_to_num=a , label_map=a , reduce_labels=a , )
return iou_result
| 685 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 1 |
'''simple docstring'''
# using dfs for finding eulerian path traversal
def UpperCamelCase_ ( A__ : Dict , A__ : Optional[Any] , A__ : Optional[int] , A__ : Optional[int]=None ):
'''simple docstring'''
lowerCAmelCase_ : int = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowerCAmelCase_, lowerCAmelCase_ : Any = True, True
lowerCAmelCase_ : Any = dfs(A__ , A__ , A__ , A__ )
return path
def UpperCamelCase_ ( A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Any = -1
for i in range(A__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowerCAmelCase_ : int = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCamelCase_ ( A__ : List[str] , A__ : Any ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowerCAmelCase_, lowerCAmelCase_ : Any = check_circuit_or_path(A__ , A__ )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
lowerCAmelCase_ : str = 1
if check == 2:
lowerCAmelCase_ : Tuple = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
lowerCAmelCase_ : List[Any] = dfs(A__ , A__ , A__ )
print(A__ )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowerCAmelCase_ : Optional[Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowerCAmelCase_ : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowerCAmelCase_ : Optional[int] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowerCAmelCase_ : str = {
1: [],
2: []
# all degree is zero
}
lowerCAmelCase_ : List[Any] = 10
check_euler(A__ , A__ )
check_euler(A__ , A__ )
check_euler(A__ , A__ )
check_euler(A__ , A__ )
check_euler(A__ , A__ )
if __name__ == "__main__":
main()
| 398 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : int ) -> str:
lowerCAmelCase_ : List[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
lowerCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCAmelCase_ : Any = {
"""do_resize""": True,
"""size""": {"""height""": 2_24, """width""": 2_24},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"""do_convert_rgb""": True,
}
lowerCAmelCase_ : Any = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def __lowercase ( self : List[str] , **lowerCamelCase : Optional[int] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __lowercase ( self : Tuple , **lowerCamelCase : str ) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __lowercase ( self : Union[str, Any] , **lowerCamelCase : Tuple ) -> str:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __lowercase ( self : Union[str, Any] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Tuple ) -> int:
lowerCAmelCase_ : Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase_ : Any = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
lowerCAmelCase_ : List[Any] = self.get_image_processor()
lowerCAmelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase )
lowerCAmelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase )
def __lowercase ( self : Union[str, Any] ) -> Dict:
lowerCAmelCase_ : Optional[int] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
lowerCAmelCase_ : Union[str, Any] = self.get_image_processor(do_normalize=lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __lowercase ( self : Optional[Any] ) -> Dict:
lowerCAmelCase_ : Tuple = self.get_image_processor()
lowerCAmelCase_ : Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
lowerCAmelCase_ : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase_ : Optional[int] = image_processor(lowerCamelCase , return_tensors="""np""" )
lowerCAmelCase_ : List[Any] = processor(images=lowerCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowercase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase_ : Dict = self.get_image_processor()
lowerCAmelCase_ : int = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
lowerCAmelCase_ : int = """Alexandra,T-shirt的价格是15便士。"""
lowerCAmelCase_ : Optional[Any] = processor(text=lowerCamelCase )
lowerCAmelCase_ : List[str] = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase_ : Dict = self.get_image_processor()
lowerCAmelCase_ : List[Any] = self.get_tokenizer()
lowerCAmelCase_ : Dict = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = """Alexandra,T-shirt的价格是15便士。"""
lowerCAmelCase_ : int = self.prepare_image_inputs()
lowerCAmelCase_ : Union[str, Any] = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def __lowercase ( self : int ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = self.get_image_processor()
lowerCAmelCase_ : int = self.get_tokenizer()
lowerCAmelCase_ : Dict = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ : List[Any] = processor.batch_decode(lowerCamelCase )
lowerCAmelCase_ : str = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __lowercase ( self : str ) -> List[Any]:
lowerCAmelCase_ : Tuple = self.get_image_processor()
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : int = ChineseCLIPProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
lowerCAmelCase_ : int = """Alexandra,T-shirt的价格是15便士。"""
lowerCAmelCase_ : str = self.prepare_image_inputs()
lowerCAmelCase_ : Tuple = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 398 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ = ["pixel_values"]
def __init__( self : Dict , __lowercase : List[Any] = True , __lowercase : Optional[int] = None , __lowercase : List[Any] = PILImageResampling.BICUBIC , __lowercase : str = True , __lowercase : Union[str, Any] = None , __lowercase : Dict = True , __lowercase : Any = 1 / 255 , __lowercase : List[Any] = True , __lowercase : List[str] = None , __lowercase : List[Any] = None , __lowercase : Any = True , **__lowercase : int , ):
"""simple docstring"""
super().__init__(**a__ )
__lowercase =size if size is not None else {"""shortest_edge""": 224}
__lowercase =get_size_dict(a__ , default_to_square=a__ )
__lowercase =crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase =get_size_dict(a__ , default_to_square=a__ , param_name='crop_size' )
__lowercase =do_resize
__lowercase =size
__lowercase =resample
__lowercase =do_center_crop
__lowercase =crop_size
__lowercase =do_rescale
__lowercase =rescale_factor
__lowercase =do_normalize
__lowercase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase =image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase =do_convert_rgb
def snake_case ( self : Any , __lowercase : List[str] , __lowercase : Tuple , __lowercase : List[Any] = PILImageResampling.BICUBIC , __lowercase : Optional[Any] = None , **__lowercase : List[Any] , ):
"""simple docstring"""
__lowercase =get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__lowercase =get_resize_output_image_size(a__ , size=size['shortest_edge'] , default_to_square=a__ )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def snake_case ( self : List[Any] , __lowercase : Any , __lowercase : Dict , __lowercase : Any = None , **__lowercase : List[Any] , ):
"""simple docstring"""
__lowercase =get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a__ , size=(size['height'], size['width']) , data_format=a__ , **a__ )
def snake_case ( self : Any , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : Union[str, Any] = None , **__lowercase : Optional[Any] , ):
"""simple docstring"""
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def snake_case ( self : List[Any] , __lowercase : List[str] , __lowercase : Dict , __lowercase : List[Any] , __lowercase : List[Any] = None , **__lowercase : List[Any] , ):
"""simple docstring"""
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def snake_case ( self : Tuple , __lowercase : Optional[int] , __lowercase : List[Any] = None , __lowercase : Union[str, Any] = None , __lowercase : List[Any] = None , __lowercase : Optional[int] = None , __lowercase : Union[str, Any] = None , __lowercase : List[str] = None , __lowercase : Union[str, Any] = None , __lowercase : Optional[Any] = None , __lowercase : Any = None , __lowercase : Union[str, Any] = None , __lowercase : List[Any] = None , __lowercase : Any = None , __lowercase : Union[str, Any] = ChannelDimension.FIRST , **__lowercase : List[str] , ):
"""simple docstring"""
__lowercase =do_resize if do_resize is not None else self.do_resize
__lowercase =size if size is not None else self.size
__lowercase =get_size_dict(a__ , param_name='size' , default_to_square=a__ )
__lowercase =resample if resample is not None else self.resample
__lowercase =do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase =crop_size if crop_size is not None else self.crop_size
__lowercase =get_size_dict(a__ , param_name='crop_size' , default_to_square=a__ )
__lowercase =do_rescale if do_rescale is not None else self.do_rescale
__lowercase =rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase =do_normalize if do_normalize is not None else self.do_normalize
__lowercase =image_mean if image_mean is not None else self.image_mean
__lowercase =image_std if image_std is not None else self.image_std
__lowercase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase =make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase =[convert_to_rgb(a__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase =[to_numpy_array(a__ ) for image in images]
if do_resize:
__lowercase =[self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
if do_center_crop:
__lowercase =[self.center_crop(image=a__ , size=a__ ) for image in images]
if do_rescale:
__lowercase =[self.rescale(image=a__ , scale=a__ ) for image in images]
if do_normalize:
__lowercase =[self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images]
__lowercase =[to_channel_dimension_format(a__ , a__ ) for image in images]
__lowercase ={"""pixel_values""": images}
return BatchFeature(data=a__ , tensor_type=a__ )
| 119 | """simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __A ( unittest.TestCase ):
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.dummy_uncond_unet
_lowerCAmelCase : int = DDIMScheduler()
_lowerCAmelCase : Any = self.dummy_vq_model
_lowerCAmelCase : List[str] = LDMPipeline(unet=a__ , vqvae=a__ , scheduler=a__ )
ldm.to(a__ )
ldm.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : List[Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = ldm(generator=a__ , num_inference_steps=2 , output_type="""numpy""" ).images
_lowerCAmelCase : Any = torch.manual_seed(0 )
_lowerCAmelCase : Tuple = ldm(generator=a__ , num_inference_steps=2 , output_type="""numpy""" , return_dict=a__ )[0]
_lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[Any] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
_lowerCAmelCase : List[str] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Any = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(a__ )
ldm.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : int = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = ldm(generator=a__ , num_inference_steps=5 , output_type="""numpy""" ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
_lowerCAmelCase : str = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 213 | 0 |
def a (lowerCAmelCase__ = 1_000 ):
__a = 3
__a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 209 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = ["""image_processor""", """tokenizer"""]
_lowerCamelCase = """LayoutLMv2ImageProcessor"""
_lowerCamelCase = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __A=None , __A=None , **__A ):
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
__a = kwargs.pop("""feature_extractor""" )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self , __A , __A = None , __A = None , __A = None , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
__a = self.image_processor(images=__A , return_tensors=__A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__A , __A ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features["""words"""]
__a = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel values
__a = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(__A , encoded_inputs["""overflow_to_sample_mapping"""] )
__a = images
return encoded_inputs
def snake_case_ ( self , __A , __A ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__A ) != len(__A ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(__A )} and {len(__A )}''' )
return images_with_overflow
def snake_case_ ( self , *__A , **__A ):
return self.tokenizer.batch_decode(*__A , **__A )
def snake_case_ ( self , *__A , **__A ):
return self.tokenizer.decode(*__A , **__A )
@property
def snake_case_ ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def snake_case_ ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def snake_case_ ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor
| 209 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ :str = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Optional[Any] = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Union[str, Any] = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Dict = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :str = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 618 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : List[Any] = ['image_processor', 'tokenizer']
__a : List[Any] = 'BlipImageProcessor'
__a : str = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , __a , __a ):
__lowerCamelCase : str = False
super().__init__(__a , __a )
__lowerCamelCase : Union[str, Any] = self.image_processor
def __call__( self , __a = None , __a = None , __a = True , __a = False , __a = None , __a = None , __a = 0 , __a = None , __a = None , __a = False , __a = False , __a = False , __a = False , __a = False , __a = True , __a = None , **__a , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__lowerCamelCase : List[Any] = self.tokenizer
__lowerCamelCase : List[str] = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
return text_encoding
# add pixel_values
__lowerCamelCase : Any = self.image_processor(__a , return_tensors=__a )
if text is not None:
__lowerCamelCase : Tuple = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
else:
__lowerCamelCase : Union[str, Any] = None
if text_encoding is not None:
encoding_image_processor.update(__a )
return encoding_image_processor
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case_ ( self ):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 594 | 0 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
super().__init__(
lowercase__ , split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , num_proc=lowercase__ , **lowercase__ , )
__A =path_or_paths if isinstance(lowercase__ , lowercase__ ) else {self.split: path_or_paths}
__A =Text(
cache_dir=lowercase__ , data_files=lowercase__ , features=lowercase__ , **lowercase__ , )
def __UpperCamelCase ( self ):
'''simple docstring'''
if self.streaming:
__A =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__A =None
__A =None
__A =None
__A =None
self.builder.download_and_prepare(
download_config=lowercase__ , download_mode=lowercase__ , verification_mode=lowercase__ , base_path=lowercase__ , num_proc=self.num_proc , )
__A =self.builder.as_dataset(
split=self.split , verification_mode=lowercase__ , in_memory=self.keep_in_memory )
return dataset
| 516 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCamelCase : List[str] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def A__ ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] , __A : List[str] , __A : Union[str, Any] ) ->str:
for attribute in key.split('''.''' ):
__A =getattr(__A , __A )
if weight_type is not None:
__A =getattr(__A , __A ).shape
else:
__A =hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__A =value
elif weight_type == "weight_g":
__A =value
elif weight_type == "weight_v":
__A =value
elif weight_type == "bias":
__A =value
else:
__A =value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A__ ( __A : int , __A : str ) ->List[str]:
__A =[]
__A =fairseq_model.state_dict()
__A =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__A =None
for name, value in fairseq_dict.items():
__A =False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == '''group''' , )
__A =True
elif name.split('''.''' )[0] == "proj":
__A =fairseq_model.proj
__A =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__A =True
if "*" in mapped_key:
__A =name.split(__A )[0].split('''.''' )[-2]
__A =mapped_key.replace('''*''' , __A )
if "weight_g" in name:
__A ='''weight_g'''
elif "weight_v" in name:
__A ='''weight_v'''
elif "bias" in name:
__A ='''bias'''
elif "weight" in name:
__A ='''weight'''
else:
__A =None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def A__ ( __A : str , __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : str ) ->Optional[Any]:
__A =full_name.split('''conv_layers.''' )[-1]
__A =name.split('''.''' )
__A =int(items[0] )
__A =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__A =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__A =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__A =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__A =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__A )
def A__ ( __A : Optional[Any] ) ->List[Any]:
__A , __A =emb.weight.shape
__A =nn.Linear(__A , __A , bias=__A )
__A =emb.weight.data
return lin_layer
def A__ ( __A : Dict ) ->Optional[int]:
with open(__A , '''r''' , encoding='''utf-8''' ) as f:
__A =f.readlines()
__A =[line.split(''' ''' )[0] for line in lines]
__A =len(__A )
__A ={
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def A__ ( __A : List[Any] , __A : Optional[Any] , __A : Tuple , __A : int , __A : str , __A : str , __A : Dict , ) ->Tuple:
__A =WavaVecaConfig.from_pretrained(__A )
__A =SpeechaTextaConfig.from_pretrained(
__A , vocab_size=__A , decoder_layers=__A , do_stable_layer_norm=__A )
__A =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
__A , __A , __A =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__A =model[0].eval()
# set weights for wav2vec2 encoder
__A =WavaVecaModel(__A )
__A =recursively_load_weights_wavaveca(model.encoder , __A )
__A =SpeechaTextaForCausalLM(__A )
__A , __A =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__A )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
__A =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__A =SpeechEncoderDecoderModel(encoder=__A , decoder=__A )
__A =False
# add projection layer
__A =nn.Parameter(projection_layer.weight )
__A =nn.Parameter(projection_layer.bias )
__A =create_vocab_dict(__A )
with open(os.path.join(__A , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(__A , __A )
__A =SpeechaTextaTokenizer(os.path.join(__A , '''vocab.json''' ) )
tokenizer.save_pretrained(__A )
__A =hf_wavavec.config.to_dict()
__A =tokenizer.pad_token_id
__A =tokenizer.bos_token_id
__A =tokenizer.eos_token_id
__A ='''speech_to_text_2'''
__A ='''wav2vec2'''
__A =SpeechEncoderDecoderConfig.from_dict(__A )
hf_wavavec.save_pretrained(__A )
feature_extractor.save_pretrained(__A )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
_lowerCamelCase : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 516 | 1 |
from math import pi
def lowerCamelCase__ ( __A :Union[str, Any] ,__A :List[str] ):
"""simple docstring"""
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 268 | _SCREAMING_SNAKE_CASE = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
_SCREAMING_SNAKE_CASE = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 537 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :str = "roformer"
def __init__( self , __UpperCAmelCase=5_0000 , __UpperCAmelCase=None , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1536 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
a__ : Any = vocab_size
a__ : List[Any] = hidden_size if embedding_size is None else embedding_size
a__ : Union[str, Any] = hidden_size
a__ : List[str] = num_hidden_layers
a__ : int = num_attention_heads
a__ : str = hidden_act
a__ : Any = intermediate_size
a__ : int = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : str = max_position_embeddings
a__ : Union[str, Any] = type_vocab_size
a__ : List[str] = initializer_range
a__ : Tuple = layer_norm_eps
a__ : Optional[int] = rotary_value
a__ : str = use_cache
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _A ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
a__ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a__ : List[str] = {0: "batch", 1: "sequence"}
a__ : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 207 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 207 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase: int = 'pegasus'
__lowerCamelCase: Any = ['past_key_values']
__lowerCamelCase: Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[Any] , a : int=5_0_2_6_5 , a : Any=1_0_2_4 , a : Union[str, Any]=1_2 , a : Optional[int]=4_0_9_6 , a : str=1_6 , a : Any=1_2 , a : Tuple=4_0_9_6 , a : Optional[int]=1_6 , a : Optional[Any]=0.0 , a : Union[str, Any]=0.0 , a : int=True , a : List[str]=True , a : Union[str, Any]="gelu" , a : Union[str, Any]=1_0_2_4 , a : Any=0.1 , a : List[Any]=0.0 , a : Any=0.0 , a : Tuple=0.02 , a : Optional[Any]=0 , a : Dict=False , a : Optional[Any]=0 , a : Tuple=1 , a : Any=1 , **a : Optional[int] , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Optional[Any] = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : int = encoder_layers
lowercase_ : Optional[Any] = encoder_attention_heads
lowercase_ : List[str] = decoder_ffn_dim
lowercase_ : List[Any] = decoder_layers
lowercase_ : int = decoder_attention_heads
lowercase_ : Tuple = dropout
lowercase_ : Tuple = attention_dropout
lowercase_ : List[str] = activation_dropout
lowercase_ : List[str] = activation_function
lowercase_ : List[str] = init_std
lowercase_ : List[str] = encoder_layerdrop
lowercase_ : Tuple = decoder_layerdrop
lowercase_ : str = use_cache
lowercase_ : List[str] = encoder_layers
lowercase_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return self.d_model
| 620 | import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case__ : int , snake_case__ : List[str]=2 , snake_case__ : List[str]=3 , snake_case__ : Tuple=4 , snake_case__ : Optional[Any]=2 , snake_case__ : int=7 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : int=True , snake_case__ : Optional[Any]=9_9 , snake_case__ : Optional[Any]=3_6 , snake_case__ : Tuple=3 , snake_case__ : int=4 , snake_case__ : Tuple=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Dict=5_1_2 , snake_case__ : Optional[int]=1_6 , snake_case__ : Dict=2 , snake_case__ : Dict=0.02 , snake_case__ : List[Any]=6 , snake_case__ : int=6 , snake_case__ : Tuple=3 , snake_case__ : List[Any]=4 , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=1_0_0_0 , ) -> Optional[int]:
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = text_seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = coordinate_size
_lowerCamelCase = shape_size
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = scope
_lowerCamelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCamelCase = text_seq_length
_lowerCamelCase = (image_size // patch_size) ** 2 + 1
_lowerCamelCase = self.text_seq_length + self.image_seq_length
def _snake_case ( self : int ) -> Any:
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCamelCase = bbox[i, j, 3]
_lowerCamelCase = bbox[i, j, 1]
_lowerCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCamelCase = bbox[i, j, 2]
_lowerCamelCase = bbox[i, j, 0]
_lowerCamelCase = t
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCamelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Optional[Any] ) -> Any:
_lowerCamelCase = LayoutLMvaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# text + image
_lowerCamelCase = model(snake_case__ , pixel_values=snake_case__ )
_lowerCamelCase = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
_lowerCamelCase = model(snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , token_type_ids=snake_case__ )
_lowerCamelCase = model(snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCamelCase = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCamelCase = model(pixel_values=snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : str , snake_case__ : int ) -> List[Any]:
_lowerCamelCase = self.num_labels
_lowerCamelCase = LayoutLMvaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Dict , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Dict ) -> Optional[Any]:
_lowerCamelCase = self.num_labels
_lowerCamelCase = LayoutLMvaForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Any ) -> Dict:
_lowerCamelCase = LayoutLMvaForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : str ) -> List[str]:
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _snake_case ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] ) -> str:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _snake_case ( self : str ) -> Any:
_lowerCamelCase = LayoutLMvaModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def _snake_case ( self : List[str] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Dict=False ) -> str:
_lowerCamelCase = copy.deepcopy(snake_case__ )
if model_class in get_values(snake_case__ ):
_lowerCamelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(snake_case__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(snake_case__ ):
_lowerCamelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
elif model_class in get_values(snake_case__ ):
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
elif model_class in [
*get_values(snake_case__ ),
]:
_lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
elif model_class in [
*get_values(snake_case__ ),
]:
_lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=snake_case__ , )
return inputs_dict
def _snake_case ( self : Any ) -> List[str]:
self.config_tester.run_common_tests()
def _snake_case ( self : int ) -> Optional[int]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _snake_case ( self : int ) -> Any:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase = type
self.model_tester.create_and_check_model(*snake_case__ )
def _snake_case ( self : int ) -> Any:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def _snake_case ( self : Any ) -> Any:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
def _snake_case ( self : int ) -> List[Any]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
@slow
def _snake_case ( self : Optional[int] ) -> List[Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = LayoutLMvaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowerCamelCase ( ) -> List[Any]:
_lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : int ) -> Any:
return LayoutLMvaImageProcessor(apply_ocr=snake_case__ ) if is_vision_available() else None
@slow
def _snake_case ( self : List[str] ) -> Tuple:
_lowerCamelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(snake_case__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=snake_case__ , return_tensors='pt' ).pixel_values.to(snake_case__ )
_lowerCamelCase = torch.tensor([[1, 2]] )
_lowerCamelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCamelCase = model(
input_ids=input_ids.to(snake_case__ ) , bbox=bbox.to(snake_case__ ) , pixel_values=pixel_values.to(snake_case__ ) , )
# verify the logits
_lowerCamelCase = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , snake_case__ )
_lowerCamelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ) ) | 544 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int , __A : int ) -> float:
"""simple docstring"""
lowercase : Union[str, Any] =(num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowercase_ ( ) -> Union[str, Any]:
"""simple docstring"""
print(sum_of_series(1 , 1 , 1_0 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
A_ : List[Any] = get_logger(__name__)
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase = None ):
UpperCamelCase_: str = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCamelCase_: Tuple = Extractor
def _a ( self , _lowerCamelCase ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCamelCase_: Optional[Any] = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def _a ( self , _lowerCamelCase , _lowerCamelCase = False ):
UpperCamelCase_: int = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
UpperCamelCase_: Optional[Any] = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@classmethod
@abstractmethod
def _a ( cls , _lowerCamelCase , **_lowerCamelCase ):
...
@staticmethod
@abstractmethod
def _a ( _lowerCamelCase , _lowerCamelCase ):
...
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
a : List[bytes] =[]
@staticmethod
def _a ( _lowerCamelCase , _lowerCamelCase ):
with open(_UpperCamelCase , 'rb' ) as f:
return f.read(_UpperCamelCase )
@classmethod
def _a ( cls , _lowerCamelCase , _lowerCamelCase = b"" ):
if not magic_number:
UpperCamelCase_: Tuple = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
UpperCamelCase_: str = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@classmethod
def _a ( cls , _lowerCamelCase , **_lowerCamelCase ):
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def _a ( _lowerCamelCase , _lowerCamelCase ):
def resolved(_lowerCamelCase ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(_lowerCamelCase , _lowerCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(_lowerCamelCase , _lowerCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCamelCase_: Optional[int] = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
UpperCamelCase_: Union[str, Any] = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def _a ( _lowerCamelCase , _lowerCamelCase ):
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
UpperCamelCase_: List[Any] = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Tuple =[B"""\x1F\x8B"""]
@staticmethod
def _a ( _lowerCamelCase , _lowerCamelCase ):
with gzip.open(_UpperCamelCase , 'rb' ) as gzip_file:
with open(_UpperCamelCase , 'wb' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Dict =[
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def _a ( cls , _lowerCamelCase , _lowerCamelCase = b"" ):
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , 'rb' ) as fp:
UpperCamelCase_: Any = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCamelCase_: int = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
UpperCamelCase_: List[str] = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _a ( _lowerCamelCase , _lowerCamelCase ):
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , 'r' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Dict =[B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def _a ( _lowerCamelCase , _lowerCamelCase ):
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , 'wb' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : List[str] =[B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def _a ( _lowerCamelCase , _lowerCamelCase ):
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
UpperCamelCase_: str = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : List[str] =[B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def _a ( _lowerCamelCase , _lowerCamelCase ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
UpperCamelCase_: int = zstd.ZstdDecompressor()
with open(_UpperCamelCase , 'rb' ) as ifh, open(_UpperCamelCase , 'wb' ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Union[str, Any] =[B"""\x42\x5A\x68"""]
@staticmethod
def _a ( _lowerCamelCase , _lowerCamelCase ):
with bza.open(_UpperCamelCase , 'rb' ) as compressed_file:
with open(_UpperCamelCase , 'wb' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : int =[B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def _a ( _lowerCamelCase , _lowerCamelCase ):
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , 'r' ) as archive:
archive.extractall(_UpperCamelCase )
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : Union[str, Any] =[B"""\x04\x22\x4D\x18"""]
@staticmethod
def _a ( _lowerCamelCase , _lowerCamelCase ):
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(_UpperCamelCase , 'rb' ) as compressed_file:
with open(_UpperCamelCase , 'wb' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class _lowerCAmelCase:
"""simple docstring"""
a : Dict[str, Type[BaseExtractor]] ={
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _a ( cls ):
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _a ( _lowerCamelCase , _lowerCamelCase ):
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def _a ( cls , _lowerCamelCase , _lowerCamelCase = False ):
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=_UpperCamelCase , )
UpperCamelCase_: Optional[Any] = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _a ( cls , _lowerCamelCase ): # <Added version="2.4.0"/>
UpperCamelCase_: List[Any] = cls._get_magic_number_max_length()
UpperCamelCase_: Any = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def _a ( cls , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = "deprecated" , ):
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
UpperCamelCase_: Dict = str(Path(_UpperCamelCase ).with_suffix('.lock' ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=_UpperCamelCase , )
UpperCamelCase_: Union[str, Any] = extractor if extractor != 'deprecated' else extractor_format
else:
UpperCamelCase_: List[Any] = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase ) | 57 | _lowerCamelCase : Optional[Any] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 403 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Optional[Any] = logging.get_logger(__name__)
A : List[Any] = torch.device("cpu")
def a__ ( ):
SCREAMING_SNAKE_CASE_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
def a__ ( __UpperCamelCase ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = dct.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = val
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
for k in state_dict.keys():
SCREAMING_SNAKE_CASE_ = k
if ".pwconv" in k:
SCREAMING_SNAKE_CASE_ = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
SCREAMING_SNAKE_CASE_ = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
SCREAMING_SNAKE_CASE_ = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
SCREAMING_SNAKE_CASE_ = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
SCREAMING_SNAKE_CASE_ = k_new.split("." )
if ls[2].isdigit():
SCREAMING_SNAKE_CASE_ = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
SCREAMING_SNAKE_CASE_ = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE_ = 1_0_0_0
SCREAMING_SNAKE_CASE_ = "huggingface/label-files"
SCREAMING_SNAKE_CASE_ = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
SCREAMING_SNAKE_CASE_ = [3, 3, 6, 4]
SCREAMING_SNAKE_CASE_ = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
SCREAMING_SNAKE_CASE_ = [3, 3, 9, 6]
SCREAMING_SNAKE_CASE_ = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
SCREAMING_SNAKE_CASE_ = [4, 3, 1_0, 5]
SCREAMING_SNAKE_CASE_ = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
SCREAMING_SNAKE_CASE_ = [4, 4, 1_2, 6]
SCREAMING_SNAKE_CASE_ = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location="cpu" , check_hash=__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , map_location="cpu" )
SCREAMING_SNAKE_CASE_ = checkpoint
SCREAMING_SNAKE_CASE_ = create_rename_keys(__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(__UpperCamelCase ).eval()
hf_model.load_state_dict(__UpperCamelCase )
# prepare test inputs
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained("preprocessor_config" )
SCREAMING_SNAKE_CASE_ = processor(images=__UpperCamelCase , return_tensors="pt" )
# compare outputs from both models
SCREAMING_SNAKE_CASE_ = get_expected_output(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , __UpperCamelCase , atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
A : List[Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 356 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
UpperCamelCase__ = AutoTokenizer.from_pretrained("""google/mt5-small""" )
UpperCamelCase__ = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
UpperCamelCase__ = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
UpperCamelCase__ = model(_lowerCamelCase , labels=_lowerCamelCase ).loss
UpperCamelCase__ = -tf.math.reduce_mean(_lowerCamelCase ).numpy()
UpperCamelCase__ = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 619 |
"""simple docstring"""
from __future__ import annotations
import math
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = size
# approximate the overall size of segment tree with given value
UpperCAmelCase__ : Tuple = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase__ : int = [0 for i in range(0 , 4 * size )]
UpperCAmelCase__ : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _a (self , _lowerCamelCase ):
"""simple docstring"""
return idx * 2
def _a (self , _lowerCamelCase ):
"""simple docstring"""
return idx * 2 + 1
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if left_element == right_element:
UpperCAmelCase__ : Union[str, Any] = a[left_element - 1]
else:
UpperCAmelCase__ : List[str] = (left_element + right_element) // 2
self.build(self.left(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.build(self.right(_lowerCamelCase ) , mid + 1 , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : int = max(
self.segment_tree[self.left(_lowerCamelCase )] , self.segment_tree[self.right(_lowerCamelCase )] )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if self.flag[idx] is True:
UpperCAmelCase__ : Union[str, Any] = self.lazy[idx]
UpperCAmelCase__ : Dict = False
if left_element != right_element:
UpperCAmelCase__ : Optional[Any] = self.lazy[idx]
UpperCAmelCase__ : Optional[int] = self.lazy[idx]
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : str = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase__ : Union[str, Any] = val
if left_element != right_element:
UpperCAmelCase__ : Union[str, Any] = val
UpperCAmelCase__ : List[Any] = val
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : List[Any] = True
return True
UpperCAmelCase__ : Tuple = (left_element + right_element) // 2
self.update(self.left(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.update(self.right(_lowerCamelCase ) , mid + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Dict = max(
self.segment_tree[self.left(_lowerCamelCase )] , self.segment_tree[self.right(_lowerCamelCase )] )
return True
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if self.flag[idx] is True:
UpperCAmelCase__ : List[Any] = self.lazy[idx]
UpperCAmelCase__ : Optional[Any] = False
if left_element != right_element:
UpperCAmelCase__ : Tuple = self.lazy[idx]
UpperCAmelCase__ : Union[str, Any] = self.lazy[idx]
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : str = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase__ : Union[str, Any] = (left_element + right_element) // 2
UpperCAmelCase__ : Tuple = self.query(self.left(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : List[Any] = self.query(self.right(_lowerCamelCase ) , mid + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return max(_lowerCamelCase , _lowerCamelCase )
def __str__(self ):
"""simple docstring"""
return str([self.query(1 , 1 , self.size , _lowerCamelCase , _lowerCamelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_A = 15
_A = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt)
| 182 | 0 |
'''simple docstring'''
from manim import *
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
def _lowerCamelCase ( self ) -> List[Any]:
_A : List[str] = Rectangle(height=0.5 , width=0.5 )
_A : Any = Rectangle(height=0.2_5 , width=0.2_5 )
_A : Dict = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_A : Optional[Any] = [mem.copy() for i in range(6 )]
_A : int = [mem.copy() for i in range(6 )]
_A : Union[str, Any] = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
_A : Optional[Any] = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
_A : List[str] = VGroup(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
_A : List[Any] = Text('''CPU''' , font_size=2_4 )
_A : Union[str, Any] = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase__ )
_A : Any = [mem.copy() for i in range(4 )]
_A : Optional[Any] = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
_A : str = Text('''GPU''' , font_size=2_4 )
_A : str = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase__ )
_A : Any = [mem.copy() for i in range(6 )]
_A : Dict = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
_A : Optional[Any] = Text('''Model''' , font_size=2_4 )
_A : Dict = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase__ )
_A : Optional[Any] = []
_A : List[Any] = []
_A : List[str] = []
for i, rect in enumerate(UpperCAmelCase__ ):
rect.set_stroke(UpperCAmelCase__ )
_A : List[Any] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=UpperCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=UpperCAmelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCAmelCase__ , buff=0.0 )
self.add(UpperCAmelCase__ )
model_cpu_arr.append(UpperCAmelCase__ )
self.add(*UpperCAmelCase__ , *UpperCAmelCase__ , *UpperCAmelCase__ )
_A : Dict = [mem.copy() for i in range(6 )]
_A : Any = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
_A : List[Any] = Text('''Loaded Checkpoint''' , font_size=2_4 )
_A : Dict = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(UpperCAmelCase__ )
_A : Optional[int] = []
_A : Optional[int] = []
for i, rect in enumerate(UpperCAmelCase__ ):
_A : Tuple = fill.copy().set_fill(UpperCAmelCase__ , opacity=0.7 )
target.move_to(UpperCAmelCase__ )
ckpt_arr.append(UpperCAmelCase__ )
_A : Optional[int] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(UpperCAmelCase__ )
self.add(*UpperCAmelCase__ , *UpperCAmelCase__ )
_A : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase__ , UpperCAmelCase__ )
_A : Union[str, Any] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(UpperCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase__ )
_A : List[Any] = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
_A : Optional[int] = [meta_mem.copy() for i in range(6 )]
_A : Optional[int] = [meta_mem.copy() for i in range(6 )]
_A : str = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
_A : Optional[Any] = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
_A : Optional[int] = VGroup(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
_A : Any = Text('''Disk''' , font_size=2_4 )
_A : str = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(UpperCAmelCase__ , run_time=3 ) , Write(UpperCAmelCase__ , run_time=1 ) , Create(UpperCAmelCase__ , run_time=1 ) )
_A : Union[str, Any] = []
for i, rect in enumerate(UpperCAmelCase__ ):
_A : Any = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(UpperCAmelCase__ , run_time=1.5 ) )
self.play(*UpperCAmelCase__ )
self.play(FadeOut(UpperCAmelCase__ ) )
_A : List[str] = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase__ , run_time=3 ) )
self.play(
FadeOut(UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ , *UpperCAmelCase__ ) , )
self.wait()
| 417 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__UpperCamelCase : Any = numpy.array([0, 0])
__UpperCamelCase : Optional[int] = numpy.array([0.5, 0.8_660_254])
__UpperCamelCase : int = numpy.array([1, 0])
__UpperCamelCase : Dict = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowercase ( lowerCAmelCase : list[numpy.ndarray] , lowerCAmelCase : int):
"""simple docstring"""
_A : str = initial_vectors
for _ in range(lowerCAmelCase):
_A : Any = iteration_step(lowerCAmelCase)
return vectors
def lowercase ( lowerCAmelCase : list[numpy.ndarray]):
"""simple docstring"""
_A : Any = []
for i, start_vector in enumerate(vectors[:-1]):
_A : List[Any] = vectors[i + 1]
new_vectors.append(lowerCAmelCase)
_A : Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3)
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60))
new_vectors.append(start_vector + difference_vector * 2 / 3)
new_vectors.append(vectors[-1])
return new_vectors
def lowercase ( lowerCAmelCase : numpy.ndarray , lowerCAmelCase : float):
"""simple docstring"""
_A : Any = numpy.radians(lowerCAmelCase)
_A , _A : str = numpy.cos(lowerCAmelCase), numpy.sin(lowerCAmelCase)
_A : Dict = numpy.array(((c, -s), (s, c)))
return numpy.dot(lowerCAmelCase , lowerCAmelCase)
def lowercase ( lowerCAmelCase : list[numpy.ndarray]):
"""simple docstring"""
_A : Tuple = plt.gca()
axes.set_aspect('''equal''')
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_A , _A : Any = zip(*lowerCAmelCase)
plt.plot(lowerCAmelCase , lowerCAmelCase)
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Optional[int] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 417 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
a_ = ['text', 'image', 'audio']
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[str] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input")
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((512, 512)))
elif input_type == "audio":
inputs.append(torch.ones(3000))
elif isinstance(_a , _a):
inputs.append(create_inputs(_a))
else:
raise ValueError(f"Invalid type requested: {input_type}")
return inputs
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = []
for output in outputs:
if isinstance(_a , (str, AgentText)):
output_types.append("text")
elif isinstance(_a , (Image.Image, AgentImage)):
output_types.append("image")
elif isinstance(_a , (torch.Tensor, AgentAudio)):
output_types.append("audio")
else:
raise ValueError(f"Invalid output: {output}")
return output_types
@is_tool_test
class _UpperCamelCase :
'''simple docstring'''
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
SCREAMING_SNAKE_CASE : List[str] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE : Optional[Any] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE : Dict = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE : Any = self.tool(*a )
if not isinstance(a , a ):
SCREAMING_SNAKE_CASE : Dict = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
SCREAMING_SNAKE_CASE : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE : List[Any] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE : Tuple = self.tool(*a )
if not isinstance(a , a ):
SCREAMING_SNAKE_CASE : List[str] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) ) | 25 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( _a):
return getitem, k
def lowerCamelCase__ ( _a , _a):
return setitem, k, v
def lowerCamelCase__ ( _a):
return delitem, k
def lowerCamelCase__ ( _a , _a , *_a):
try:
return fun(_a , *_a), None
except Exception as e:
return None, e
a_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items"),
pytest.param(_overwrite_items , id="overwrite items"),
pytest.param(_delete_items , id="delete items"),
pytest.param(_access_absent_items , id="access absent items"),
pytest.param(_add_with_resize_up , id="add with resize up"),
pytest.param(_add_with_resize_down , id="add with resize down"),
) , )
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Dict = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE : List[str] = {}
for _, (fun, *args) in enumerate(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = _run_operation(_a , _a , *_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = _run_operation(_a , _a , *_a)
assert my_res == py_res
assert str(_a) == str(_a)
assert set(_a) == set(_a)
assert len(_a) == len(_a)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ( ):
def is_public(_a) -> bool:
return not name.startswith("_")
SCREAMING_SNAKE_CASE : List[str] = {name for name in dir({}) if is_public(_a)}
SCREAMING_SNAKE_CASE : Union[str, Any] = {name for name in dir(HashMap()) if is_public(_a)}
assert dict_public_names > hash_public_names | 25 | 1 |
from __future__ import annotations
__lowerCamelCase : List[Any] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ) -> Dict:
__magic_name__ : List[Any] = graph
# mapping node to its parent in resulting breadth first tree
__magic_name__ : List[Any] = {}
__magic_name__ : Dict = source_vertex
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
__magic_name__ : Dict = {self.source_vertex}
__magic_name__ : Optional[int] = None
__magic_name__ : Any = [self.source_vertex] # first in first out queue
while queue:
__magic_name__ : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_snake_case )
__magic_name__ : Tuple = vertex
queue.append(_snake_case )
def UpperCAmelCase__ ( self : str , lowerCamelCase_ : str ) -> Dict:
if target_vertex == self.source_vertex:
return self.source_vertex
__magic_name__ : Union[str, Any] = self.parent.get(_snake_case )
if target_vertex_parent is None:
__magic_name__ : Tuple = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_snake_case )
return self.shortest_path(_snake_case ) + F'''->{target_vertex}'''
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 708 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 501 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A:
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=2 , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :Dict = parent
_UpperCamelCase :Any = batch_size
_UpperCamelCase :str = image_size
_UpperCamelCase :int = patch_size
_UpperCamelCase :int = num_channels
_UpperCamelCase :int = is_training
_UpperCamelCase :List[str] = use_labels
_UpperCamelCase :int = hidden_size
_UpperCamelCase :Tuple = num_hidden_layers
_UpperCamelCase :Optional[int] = num_attention_heads
_UpperCamelCase :Optional[int] = intermediate_size
_UpperCamelCase :Tuple = hidden_act
_UpperCamelCase :int = hidden_dropout_prob
_UpperCamelCase :int = attention_probs_dropout_prob
_UpperCamelCase :Any = type_sequence_label_size
_UpperCamelCase :List[str] = initializer_range
_UpperCamelCase :Optional[Any] = scope
_UpperCamelCase :Union[str, Any] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase :Union[str, Any] = (image_size // patch_size) ** 2
_UpperCamelCase :List[str] = num_patches + 1
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase :str = None
if self.use_labels:
_UpperCamelCase :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase :List[Any] = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase( self ) -> List[str]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Any = ViTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_UpperCamelCase :int = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = ViTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase :Any = 1
_UpperCamelCase :List[Any] = ViTForMaskedImageModeling(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_UpperCamelCase :Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase :str = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
_UpperCamelCase :str = self.type_sequence_label_size
_UpperCamelCase :str = ViTForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase :str = 1
_UpperCamelCase :List[str] = ViTForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_UpperCamelCase :List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase :str = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Any = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) :Dict = config_and_inputs
_UpperCamelCase :Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
A = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A = True
A = False
A = False
A = False
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :int = ViTModelTester(self )
_UpperCamelCase :Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
pass
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase :Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase :Dict = model_class(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase :Optional[int] = [*signature.parameters.keys()]
_UpperCamelCase :Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase :List[Any] = ViTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def A_ ( ) -> Optional[int]:
_UpperCamelCase :int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :List[str] = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Optional[Any] = self.default_image_processor
_UpperCamelCase :str = prepare_img()
_UpperCamelCase :List[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
_UpperCamelCase :str = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
_UpperCamelCase :Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Dict = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :Tuple = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :str = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=4_80 )
_UpperCamelCase :List[str] = prepare_img()
_UpperCamelCase :List[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
_UpperCamelCase :List[Any] = inputs.pixel_values.to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
_UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE__ , interpolate_pos_encoding=SCREAMING_SNAKE_CASE__ )
# verify the logits
_UpperCamelCase :Optional[Any] = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :List[Any] = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :List[Any] = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
_UpperCamelCase :Tuple = self.default_image_processor
_UpperCamelCase :Union[str, Any] = prepare_img()
_UpperCamelCase :Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
_UpperCamelCase :List[Any] = inputs.pixel_values.to(SCREAMING_SNAKE_CASE__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase :Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
| 355 |
"""simple docstring"""
import os
import sys
UpperCamelCase__ :Union[str, Any] = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase__ :List[Any] = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def A_ ( *snake_case__ , **snake_case__ ) -> int:
return AutoConfig.from_pretrained(*snake_case__ , **snake_case__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def A_ ( *snake_case__ , **snake_case__ ) -> int:
return AutoTokenizer.from_pretrained(*snake_case__ , **snake_case__ )
@add_start_docstrings(AutoModel.__doc__ )
def A_ ( *snake_case__ , **snake_case__ ) -> Dict:
return AutoModel.from_pretrained(*snake_case__ , **snake_case__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def A_ ( *snake_case__ , **snake_case__ ) -> Dict:
return AutoModelForCausalLM.from_pretrained(*snake_case__ , **snake_case__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def A_ ( *snake_case__ , **snake_case__ ) -> Dict:
return AutoModelForMaskedLM.from_pretrained(*snake_case__ , **snake_case__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def A_ ( *snake_case__ , **snake_case__ ) -> Dict:
return AutoModelForSequenceClassification.from_pretrained(*snake_case__ , **snake_case__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def A_ ( *snake_case__ , **snake_case__ ) -> Union[str, Any]:
return AutoModelForQuestionAnswering.from_pretrained(*snake_case__ , **snake_case__ )
| 355 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a_ ( unittest.TestCase ):
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = 0
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(_SCREAMING_SNAKE_CASE ) / """preprocessor_config.json"""
UpperCamelCase = Path(_SCREAMING_SNAKE_CASE ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_SCREAMING_SNAKE_CASE , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_SCREAMING_SNAKE_CASE , """w""" ) )
UpperCamelCase = AutoImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(_SCREAMING_SNAKE_CASE ) / """preprocessor_config.json"""
UpperCamelCase = Path(_SCREAMING_SNAKE_CASE ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_SCREAMING_SNAKE_CASE , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_SCREAMING_SNAKE_CASE , """w""" ) )
UpperCamelCase = AutoImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
UpperCamelCase = Path(_SCREAMING_SNAKE_CASE ) / """preprocessor_config.json"""
UpperCamelCase = Path(_SCREAMING_SNAKE_CASE ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_SCREAMING_SNAKE_CASE , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_SCREAMING_SNAKE_CASE , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
UpperCamelCase = AutoImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE ).to_dict()
config_dict.pop("""image_processor_type""" )
UpperCamelCase = CLIPImageProcessor(**_SCREAMING_SNAKE_CASE )
# save in new folder
model_config.save_pretrained(_SCREAMING_SNAKE_CASE )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = AutoImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
# make sure private variable is not incorrectly saved
UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(_SCREAMING_SNAKE_CASE ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_SCREAMING_SNAKE_CASE , """w""" ) , )
UpperCamelCase = AutoImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , """clip-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase = AutoImageProcessor.from_pretrained("""clip-base""" )
def A__ ( self ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase = AutoImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE , revision="""aaaaaa""" )
def A__ ( self ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def A__ ( self ) -> str:
"""simple docstring"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
UpperCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = AutoImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def A__ ( self ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , _SCREAMING_SNAKE_CASE )
AutoImageProcessor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoImageProcessor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = Path(_SCREAMING_SNAKE_CASE ) / """preprocessor_config.json"""
UpperCamelCase = Path(_SCREAMING_SNAKE_CASE ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_SCREAMING_SNAKE_CASE , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_SCREAMING_SNAKE_CASE , """w""" ) )
UpperCamelCase = CustomImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = AutoImageProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
class a_ ( lowerCamelCase ):
lowercase = True
try:
AutoConfig.register("""custom""" , _SCREAMING_SNAKE_CASE )
AutoImageProcessor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
UpperCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(_SCREAMING_SNAKE_CASE , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 708 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 0 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ : Optional[int] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCamelCase_ : Optional[Any] = key.replace('heads.cmd.mim_head.cls.predictions' ,'mmm_image_head' )
lowerCamelCase_ : List[Any] = key.replace('heads.cmd.mlm_head.cls.predictions' ,'mmm_text_head' )
lowerCamelCase_ : List[str] = key.replace('heads.cmd.itm_head.cls' ,'itm_head' )
lowerCamelCase_ : List[Any] = key.replace('heads.cmd.itm_head.pooler' ,'itm_head.pooler' )
lowerCamelCase_ : List[str] = key.replace('heads.cmd.clip_head.logit_scale' ,'flava.logit_scale' )
lowerCamelCase_ : List[Any] = key.replace('heads.fairseq_mlm.cls.predictions' ,'mlm_head' )
lowerCamelCase_ : str = key.replace('heads.imagenet.mim_head.cls.predictions' ,'mim_head' )
lowerCamelCase_ : Any = key.replace('mm_text_projection' ,'flava.text_to_mm_projection' )
lowerCamelCase_ : List[Any] = key.replace('mm_image_projection' ,'flava.image_to_mm_projection' )
lowerCamelCase_ : List[Any] = key.replace('image_encoder.module' ,'flava.image_model' )
lowerCamelCase_ : Dict = key.replace('text_encoder.module' ,'flava.text_model' )
lowerCamelCase_ : str = key.replace('mm_encoder.module.encoder.cls_token' ,'flava.multimodal_model.cls_token' )
lowerCamelCase_ : str = key.replace('mm_encoder.module' ,'flava.multimodal_model' )
lowerCamelCase_ : int = key.replace('text_projection' ,'flava.text_projection' )
lowerCamelCase_ : str = key.replace('image_projection' ,'flava.image_projection' )
lowerCamelCase_ : str = value.float()
for key, value in codebook_state_dict.items():
lowerCamelCase_ : Optional[Any] = value
return upgrade
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ):
if config_path is not None:
lowerCamelCase_ : List[str] = FlavaConfig.from_pretrained(lowerCAmelCase__ )
else:
lowerCamelCase_ : Optional[int] = FlavaConfig()
lowerCamelCase_ : Union[str, Any] = FlavaForPreTraining(lowerCAmelCase__ ).eval()
lowerCamelCase_ : List[Any] = convert_dalle_checkpoint(lowerCAmelCase__ ,lowerCAmelCase__ ,save_checkpoint=lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ : Any = torch.load(lowerCAmelCase__ ,map_location='cpu' )
else:
lowerCamelCase_ : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='cpu' )
lowerCamelCase_ : List[str] = upgrade_state_dict(lowerCAmelCase__ ,lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
lowerCamelCase_ : Union[str, Any] = hf_model.state_dict()
lowerCamelCase_ : Dict = count_parameters(lowerCAmelCase__ )
lowerCamelCase_ : Dict = count_parameters(lowerCAmelCase__ ) + count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1e-3 )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
_lowercase : Dict =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_lowercase : Optional[Any] =parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 364 |
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : Dict = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase_ : Optional[int] = len(lowerCAmelCase__ ) if (len(lowerCAmelCase__ ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) ,'Stack'.center(lowerCAmelCase__ ) ,'Postfix'.center(lowerCAmelCase__ ) ,sep=' | ' ,)
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCAmelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCAmelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCAmelCase__ ) == 0:
stack.append(lowerCAmelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCAmelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCAmelCase__ ) # push x to stack
print(
x.center(8 ) ,(''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) ,(''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) ,sep=' | ' ,) # Output in tabular format
while len(lowerCAmelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) ,(''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) ,(''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) ,sep=' | ' ,) # Output in tabular format
return "".join(lowerCAmelCase__ ) # return Postfix as str
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : Dict = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCAmelCase__ ) ):
if infix[i] == "(":
lowerCamelCase_ : str = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase_ : Optional[Any] = '(' # change ")" to "("
return (infix_2_postfix(''.join(lowerCAmelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_lowercase : int =input("""\nEnter an Infix Equation = """) # Input an Infix equation
_lowercase : Optional[Any] ="""""".join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 364 | 1 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
UpperCAmelCase_ : Dict = get_logger(__name__)
UpperCAmelCase_ : Any = Path(__file__).parent / """model_card_template.md"""
UpperCAmelCase_ : int = uuida().hex
UpperCAmelCase_ : List[str] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
UpperCAmelCase_ : List[Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
UpperCAmelCase_ : Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def SCREAMING_SNAKE_CASE_ ( __A : Any = None ) -> str:
"""simple docstring"""
a_ : List[str] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__A , __A ):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__A , __A ):
ua += "; " + user_agent
return ua
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Optional[int] = None , __A : Tuple = None ) -> Union[str, Any]:
"""simple docstring"""
if token is None:
a_ : List[Any] = HfFolder.get_token()
if organization is None:
a_ : int = whoami(__A )['name']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : List[str] ) -> Any:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__A , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a_ : Optional[int] = args.hub_token if hasattr(__A , 'hub_token' ) else None
a_ : str = get_full_repo_name(__A , token=__A )
a_ : List[str] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__A , model_name=__A , repo_name=__A , dataset_name=args.dataset_name if hasattr(__A , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__A , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__A , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__A , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__A , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__A , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__A , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__A , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__A , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__A , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__A , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a_ : Optional[int] = os.path.join(args.output_dir , 'README.md' )
model_card.save(__A )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Dict = None ) -> str:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
a_ : int = str(Path(__A ).as_posix() )
a_ : Any = re.search(R'snapshots/([^/]+)/' , __A )
if search is None:
return None
a_ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__A ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
UpperCAmelCase_ : List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
UpperCAmelCase_ : Optional[Any] = os.path.join(hf_cache_home, 'diffusers')
def SCREAMING_SNAKE_CASE_ ( __A : int = None , __A : Optional[Any] = None ) -> None:
"""simple docstring"""
if new_cache_dir is None:
a_ : Optional[Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
a_ : Tuple = old_diffusers_cache
a_ : Any = Path(__A ).expanduser()
a_ : Any = Path(__A ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a_ : Optional[Any] = new_cache_dir / old_blob_path.relative_to(__A )
new_blob_path.parent.mkdir(parents=__A , exist_ok=__A )
os.replace(__A , __A )
try:
os.symlink(__A , __A )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
UpperCAmelCase_ : str = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
UpperCAmelCase_ : Any = 0
else:
with open(cache_version_file) as f:
try:
UpperCAmelCase_ : Optional[int] = int(f.read())
except ValueError:
UpperCAmelCase_ : Optional[Any] = 0
if cache_version < 1:
UpperCAmelCase_ : Union[str, Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
UpperCAmelCase_ : Optional[Any] = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'the directory exists and can be written to.'
)
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Any = None ) -> str:
"""simple docstring"""
if variant is not None:
a_ : Dict = weights_name.split('.' )
a_ : int = splits[:-1] + [variant] + splits[-1:]
a_ : List[Any] = '.'.join(__A )
return weights_name
def SCREAMING_SNAKE_CASE_ ( __A : int , *,
__A : str , __A : Union[str, Any] , __A : str , __A : List[Any] , __A : List[str] , __A : Dict , __A : Optional[int] , __A : Any , __A : Tuple , __A : Any , __A : Tuple=None , ) -> Optional[Any]:
"""simple docstring"""
a_ : Optional[Any] = str(__A )
if os.path.isfile(__A ):
return pretrained_model_name_or_path
elif os.path.isdir(__A ):
if os.path.isfile(os.path.join(__A , __A ) ):
# Load from a PyTorch checkpoint
a_ : Any = os.path.join(__A , __A )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__A , __A , __A ) ):
a_ : Any = os.path.join(__A , __A , __A )
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__A ).base_version ) >= version.parse('0.20.0' )
):
try:
a_ : List[Any] = hf_hub_download(
__A , filename=_add_variant(__A , __A ) , cache_dir=__A , force_download=__A , proxies=__A , resume_download=__A , local_files_only=__A , use_auth_token=__A , user_agent=__A , subfolder=__A , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.""" , __A , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__A , __A )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__A , __A )}\' so that the correct variant file can be added.""" , __A , )
try:
# 2. Load model file as usual
a_ : List[Any] = hf_hub_download(
__A , filename=__A , cache_dir=__A , force_download=__A , proxies=__A , resume_download=__A , local_files_only=__A , use_auth_token=__A , user_agent=__A , subfolder=__A , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
F"""\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
F"""We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
F"""Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
F"""Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory """
F"""containing a file named {weights_name}""" )
| 720 |
import random
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : str ) -> tuple[list[int], list[int]]:
a_ : int = [ord(SCREAMING_SNAKE_CASE__ ) for i in text]
a_ : Any = []
a_ : Optional[int] = []
for i in plain:
a_ : Tuple = random.randint(1 , 3_0_0 )
a_ : Optional[int] = (i + k) * k
cipher.append(SCREAMING_SNAKE_CASE__ )
key.append(SCREAMING_SNAKE_CASE__ )
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] ) -> str:
a_ : List[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
a_ : str = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(SCREAMING_SNAKE_CASE__ ) )
return "".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ , UpperCAmelCase_ : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 443 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : str =logging.get_logger(__name__)
_UpperCamelCase : int ={
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class UpperCAmelCase__ ( __snake_case ):
__snake_case : str = "open-llama"
def __init__( self ,A__=100000 ,A__=4096 ,A__=11008 ,A__=32 ,A__=32 ,A__="silu" ,A__=2048 ,A__=0.02 ,A__=1E-6 ,A__=True ,A__=0 ,A__=1 ,A__=2 ,A__=False ,A__=True ,A__=0.1 ,A__=0.1 ,A__=True ,A__=True ,A__=None ,**A__ ,):
_A : Optional[Any] = vocab_size
_A : List[Any] = max_position_embeddings
_A : Dict = hidden_size
_A : str = intermediate_size
_A : Any = num_hidden_layers
_A : Tuple = num_attention_heads
_A : Union[str, Any] = hidden_act
_A : List[str] = initializer_range
_A : Optional[int] = rms_norm_eps
_A : List[str] = use_cache
_A : str = kwargs.pop(
'''use_memorry_efficient_attention''' ,A__ )
_A : str = hidden_dropout_prob
_A : Dict = attention_dropout_prob
_A : Optional[int] = use_stable_embedding
_A : Any = shared_input_output_embedding
_A : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ ,tie_word_embeddings=A__ ,**A__ ,)
def A__ ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,A__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
_A : Tuple = self.rope_scaling.get('''type''' ,A__ )
_A : Tuple = self.rope_scaling.get('''factor''' ,A__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(A__ ,A__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 206 |
def a__ (__lowercase :int , __lowercase :int ) -> int:
return abs(__lowercase ) if a == 0 else greatest_common_divisor(b % a , __lowercase )
def a__ (__lowercase :int , __lowercase :int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_A , _A : Union[str, Any] = y, x % y
return abs(__lowercase )
def a__ () -> Optional[Any]:
try:
_A : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
_A : List[Any] = int(nums[0] )
_A : Optional[int] = int(nums[1] )
print(
f"""greatest_common_divisor({num_a}, {num_a}) = """
f"""{greatest_common_divisor(__lowercase , __lowercase )}""" )
print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowercase , __lowercase )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 206 | 1 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCamelCase__ = HfApi()
lowerCamelCase__ = {}
# fmt: off
lowerCamelCase__ = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
lowerCamelCase__ = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
lowerCamelCase__ = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
lowerCamelCase__ = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
lowerCamelCase__ = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
lowerCamelCase__ = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
lowerCamelCase__ = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
lowerCamelCase__ = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
lowerCamelCase__ = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
lowerCamelCase__ = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
lowerCamelCase__ = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
lowerCamelCase__ = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
lowerCamelCase__ = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
lowerCamelCase__ = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
lowerCamelCase__ = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
lowerCamelCase__ = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCamelCase__ = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('CompVis'):
lowerCamelCase__ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
lowerCamelCase__ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCamelCase__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCamelCase__ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCamelCase__ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1e-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 708 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40 | 0 |
from sklearn.metrics import fa_score
import datasets
SCREAMING_SNAKE_CASE :Optional[int] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
SCREAMING_SNAKE_CASE :int = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
SCREAMING_SNAKE_CASE :List[str] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) ,reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] ,)
def UpperCamelCase_ ( self : Dict ,A : Dict ,A : str ,A : str=None ,A : Tuple=1 ,A : Any="binary" ,A : List[Any]=None ):
__A = fa_score(
A ,A ,labels=A ,pos_label=A ,average=A ,sample_weight=A )
return {"f1": float(A ) if score.size == 1 else score}
| 55 |
import requests
SCREAMING_SNAKE_CASE :List[str] = 'YOUR API KEY'
def UpperCAmelCase ( a_ , a_ = giphy_api_key ) -> list:
"""simple docstring"""
__A = "+".join(query.split() )
__A = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
__A = requests.get(a_ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 55 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE_:List[Any] = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
SCREAMING_SNAKE_CASE_:Any = json.load(f)
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return FSMTTokenizer.from_pretrained(lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
A : Optional[Any] = f'''facebook/wmt19-{pair}'''
A : Dict = self.get_tokenizer(lowerCamelCase__ )
A : int = self.get_model(lowerCamelCase__ )
A : Any = bleu_data[pair]["""src"""]
A : int = bleu_data[pair]["""tgt"""]
A : Any = tokenizer(lowerCamelCase__, return_tensors="""pt""", truncation=lowerCamelCase__, padding="""longest""" ).to(lowerCamelCase__ )
A : str = model.generate(
input_ids=batch.input_ids, num_beams=8, )
A : List[Any] = tokenizer.batch_decode(
lowerCamelCase__, skip_special_tokens=lowerCamelCase__, clean_up_tokenization_spaces=lowerCamelCase__ )
A : Optional[Any] = calculate_bleu(lowerCamelCase__, lowerCamelCase__ )
print(lowerCamelCase__ )
self.assertGreaterEqual(scores["""bleu"""], lowerCamelCase__ )
| 520 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_:Dict = """▁"""
SCREAMING_SNAKE_CASE_:Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = BigBirdTokenizer
__lowerCamelCase : int = BigBirdTokenizerFast
__lowerCamelCase : int = True
__lowerCamelCase : List[Any] = True
def _lowerCAmelCase ( self ):
super().setUp()
A : Optional[int] = self.tokenizer_class(lowerCamelCase__, keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Optional[int] = """<s>"""
A : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<unk>""" )
self.assertEqual(vocab_keys[1], """<s>""" )
self.assertEqual(vocab_keys[-1], """[MASK]""" )
self.assertEqual(len(lowerCamelCase__ ), 1004 )
def _lowerCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size, 1000 )
def _lowerCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
A : Tuple = self.get_tokenizer()
A : Optional[int] = self.get_rust_tokenizer()
A : Tuple = """I was born in 92000, and this is falsé."""
A : Optional[Any] = tokenizer.tokenize(lowerCamelCase__ )
A : int = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
A : Tuple = tokenizer.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
A : List[str] = rust_tokenizer.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
A : Union[str, Any] = self.get_rust_tokenizer()
A : Tuple = tokenizer.encode(lowerCamelCase__ )
A : int = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : int = BigBirdTokenizer(lowerCamelCase__, keep_accents=lowerCamelCase__ )
A : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase__, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ), [285, 46, 10, 170, 382], )
A : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase__, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
], )
A : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], )
A : List[str] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
], )
@cached_property
def _lowerCAmelCase ( self ):
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def _lowerCAmelCase ( self ):
A : Union[str, Any] = """Hello World!"""
A : Union[str, Any] = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(lowerCamelCase__, self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def _lowerCAmelCase ( self ):
A : int = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
A : Dict = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowerCamelCase__, self.big_tokenizer.encode(lowerCamelCase__ ) )
@require_torch
@slow
def _lowerCAmelCase ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
A : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10]
A : Optional[Any] = """ """.join(lowerCamelCase__ )
A : int = self.big_tokenizer.encode_plus(lowerCamelCase__, return_tensors="""pt""", return_token_type_ids=lowerCamelCase__ )
A : int = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence], return_tensors="""pt""", return_token_type_ids=lowerCamelCase__ )
A : Tuple = BigBirdConfig(attention_type="""original_full""" )
A : List[str] = BigBirdModel(lowerCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase__ )
model(**lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : List[Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
A : Optional[Any] = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def _lowerCAmelCase ( self ):
# fmt: off
A : Any = {"""input_ids""": [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__, model_name="""google/bigbird-roberta-base""", revision="""215c99f1600e06f83acce68422f2035b2b5c3510""", )
| 520 | 1 |
def __UpperCAmelCase ( lowerCamelCase_ : list ) -> list:
"""simple docstring"""
if len(lowerCamelCase_ ) <= 1:
return [tuple(lowerCamelCase_ )]
SCREAMING_SNAKE_CASE_ : Optional[int] = []
def generate(lowerCamelCase_ : int , lowerCamelCase_ : list ):
SCREAMING_SNAKE_CASE_ : Any = [0] * n
res.append(tuple(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = arr[i], arr[0]
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = arr[i], arr[c[i]]
res.append(tuple(lowerCamelCase_ ) )
c[i] += 1
SCREAMING_SNAKE_CASE_ : int = 0
else:
SCREAMING_SNAKE_CASE_ : str = 0
i += 1
generate(len(lowerCamelCase_ ) , lowerCamelCase_ )
return res
if __name__ == "__main__":
UpperCamelCase__ : Any = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ : List[Any] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 105 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((a__) , (a__)) : List[Any] = extended_euclid(lowerCAmelCase__ , a % b )
a__ : str = a // b
return (y, x - k * y)
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
((a__) , (a__)) : Tuple = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[str] = na * na
a__ : Union[str, Any] = ra * x * na + ra * y * na
return (n % m + m) % m
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
((a__) , (a__)) : Optional[Any] = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
if b < 0:
a__ : Optional[int] = (b % n + n) % n
return b
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
a__ , a__ : List[Any] = invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ ), invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Dict = na * na
a__ : Any = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True) | 642 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __magic_name__ :
'''simple docstring'''
def __init__( self:str , _a:List[str] , _a:Dict=2 , _a:int=3 , _a:int=4 , _a:List[str]=2 , _a:int=7 , _a:int=True , _a:Union[str, Any]=True , _a:List[Any]=True , _a:Union[str, Any]=True , _a:Union[str, Any]=99 , _a:Tuple=36 , _a:Tuple=3 , _a:Any=4 , _a:str=37 , _a:List[Any]="gelu" , _a:List[Any]=0.1 , _a:Tuple=0.1 , _a:List[Any]=5_12 , _a:str=16 , _a:Tuple=2 , _a:Any=0.02 , _a:List[Any]=6 , _a:Union[str, Any]=6 , _a:Optional[int]=3 , _a:Optional[Any]=4 , _a:Union[str, Any]=None , _a:Dict=10_00 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = text_seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = coordinate_size
snake_case__ = shape_size
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
snake_case__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case__ = text_seq_length
snake_case__ = (image_size // patch_size) ** 2 + 1
snake_case__ = self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case__ = bbox[i, j, 3]
snake_case__ = bbox[i, j, 1]
snake_case__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case__ = bbox[i, j, 2]
snake_case__ = bbox[i, j, 0]
snake_case__ = t
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:Any , _a:Optional[Any] , _a:Tuple , _a:int , _a:Dict , _a:Union[str, Any] , _a:Optional[Any] , _a:Tuple ):
snake_case__ = LayoutLMvaModel(config=_A )
model.to(_A )
model.eval()
# text + image
snake_case__ = model(_A , pixel_values=_A )
snake_case__ = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A )
snake_case__ = model(_A , bbox=_A , pixel_values=_A , token_type_ids=_A )
snake_case__ = model(_A , bbox=_A , pixel_values=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case__ = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case__ = model(pixel_values=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Union[str, Any] , _a:Dict , _a:Optional[Any] , _a:Dict , _a:Union[str, Any] , _a:Union[str, Any] , _a:Any , _a:Optional[int] ):
snake_case__ = self.num_labels
snake_case__ = LayoutLMvaForSequenceClassification(_A )
model.to(_A )
model.eval()
snake_case__ = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Tuple , _a:List[str] , _a:Tuple , _a:Tuple , _a:Optional[int] , _a:int , _a:Dict , _a:Union[str, Any] ):
snake_case__ = self.num_labels
snake_case__ = LayoutLMvaForTokenClassification(config=_A )
model.to(_A )
model.eval()
snake_case__ = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:List[str] , _a:int , _a:Any , _a:Tuple , _a:List[str] , _a:List[str] , _a:Optional[Any] , _a:Tuple ):
snake_case__ = LayoutLMvaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
snake_case__ = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.prepare_config_and_inputs()
(
snake_case__
) = config_and_inputs
snake_case__ = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __magic_name__ (a__ ,a__ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = False
__lowercase : List[str] = False
__lowercase : Optional[int] = False
__lowercase : Any = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase : List[str] = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int , _a:List[str] , _a:int , _a:int , _a:Optional[int] ):
return True
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = LayoutLMvaModelTester(self )
snake_case__ = ConfigTester(self , config_class=_A , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:int , _a:Tuple , _a:List[Any]=False ):
snake_case__ = copy.deepcopy(_A )
if model_class in get_values(_A ):
snake_case__ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_A , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
snake_case__ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_A )
elif model_class in get_values(_A ):
snake_case__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
snake_case__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
elif model_class in [
*get_values(_A ),
]:
snake_case__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
elif model_class in [
*get_values(_A ),
]:
snake_case__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_A , )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ = type
self.model_tester.create_and_check_model(*_A )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = LayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(_A )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_A , return_tensors='''pt''' ).pixel_values.to(_A )
snake_case__ = torch.tensor([[1, 2]] )
snake_case__ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case__ = model(
input_ids=input_ids.to(_A ) , bbox=bbox.to(_A ) , pixel_values=pixel_values.to(_A ) , )
# verify the logits
snake_case__ = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , _A )
snake_case__ = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 716 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase__ : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : int = ['pixel_values']
def __init__( self:List[str] , _a:bool = True , _a:Dict[str, int] = None , _a:PILImageResampling = PILImageResampling.BICUBIC , _a:bool = True , _a:Dict[str, int] = None , _a:bool = True , _a:Union[int, float] = 1 / 2_55 , _a:bool = True , _a:Optional[Union[float, List[float]]] = None , _a:Optional[Union[float, List[float]]] = None , _a:bool = True , **_a:Union[str, Any] , ):
super().__init__(**_a )
snake_case__ = size if size is not None else {'''shortest_edge''': 2_24}
snake_case__ = get_size_dict(_a , default_to_square=_a )
snake_case__ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
snake_case__ = get_size_dict(_a , default_to_square=_a , param_name='''crop_size''' )
snake_case__ = do_resize
snake_case__ = size
snake_case__ = resample
snake_case__ = do_center_crop
snake_case__ = crop_size
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_normalize
snake_case__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case__ = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case__ = do_convert_rgb
def SCREAMING_SNAKE_CASE__ ( self:str , _a:np.ndarray , _a:Dict[str, int] , _a:PILImageResampling = PILImageResampling.BICUBIC , _a:Optional[Union[str, ChannelDimension]] = None , **_a:str , ):
snake_case__ = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case__ = get_resize_output_image_size(_a , size=size['''shortest_edge'''] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:np.ndarray , _a:Dict[str, int] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:Any , ):
snake_case__ = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:np.ndarray , _a:Union[int, float] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:List[Any] , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:np.ndarray , _a:Union[float, List[float]] , _a:Union[float, List[float]] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:Tuple , ):
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:ImageInput , _a:bool = None , _a:Dict[str, int] = None , _a:PILImageResampling = None , _a:bool = None , _a:int = None , _a:bool = None , _a:float = None , _a:bool = None , _a:Optional[Union[float, List[float]]] = None , _a:Optional[Union[float, List[float]]] = None , _a:bool = None , _a:Optional[Union[str, TensorType]] = None , _a:Optional[ChannelDimension] = ChannelDimension.FIRST , **_a:Any , ):
snake_case__ = do_resize if do_resize is not None else self.do_resize
snake_case__ = size if size is not None else self.size
snake_case__ = get_size_dict(_a , param_name='''size''' , default_to_square=_a )
snake_case__ = resample if resample is not None else self.resample
snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ = crop_size if crop_size is not None else self.crop_size
snake_case__ = get_size_dict(_a , param_name='''crop_size''' , default_to_square=_a )
snake_case__ = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ = image_mean if image_mean is not None else self.image_mean
snake_case__ = image_std if image_std is not None else self.image_std
snake_case__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case__ = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case__ = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
snake_case__ = [to_numpy_array(_a ) for image in images]
if do_resize:
snake_case__ = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
snake_case__ = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
snake_case__ = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
snake_case__ = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
snake_case__ = [to_channel_dimension_format(_a , _a ) for image in images]
snake_case__ = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
| 208 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0_0_0 ):
'''simple docstring'''
snake_case_ : List[str] = 2**power
snake_case_ : List[Any] = 0
while n:
snake_case_ , snake_case_ : str = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowercase : Any = HUGGINGFACE_HUB_CACHE
lowercase : Any = "config.json"
lowercase : Any = "diffusion_pytorch_model.bin"
lowercase : Optional[Any] = "diffusion_flax_model.msgpack"
lowercase : Optional[Any] = "model.onnx"
lowercase : List[str] = "diffusion_pytorch_model.safetensors"
lowercase : Any = "weights.pb"
lowercase : Tuple = "https://huggingface.co"
lowercase : int = default_cache_path
lowercase : List[str] = "diffusers_modules"
lowercase : Tuple = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
lowercase : Tuple = ["fp16", "non-ema"]
lowercase : str = ".self_attn" | 327 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ , A__ ):
while b:
a_ , a_ = b, a % b
return a
def UpperCamelCase_ ( A__ , A__ ):
return a if b == 0 else euclidean_gcd_recursive(A__ , a % b )
def UpperCamelCase_ ( ):
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 511 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase__ ='pt'
elif is_tf_available():
lowercase__ ='tf'
else:
lowercase__ ='jax'
class a_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : int = PerceiverTokenizer
lowerCamelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self ):
super().setUp()
a_ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self ):
return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" )
def lowerCAmelCase__ ( self , **UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=20 , UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
a_ = []
for i in range(len(UpperCAmelCase ) ):
try:
a_ = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
a_ = list(filter(lambda UpperCAmelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , UpperCAmelCase ) )
a_ = list(filter(lambda UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase ) , UpperCAmelCase ) )
if max_length is not None and len(UpperCAmelCase ) > max_length:
a_ = toks[:max_length]
if min_length is not None and len(UpperCAmelCase ) < min_length and len(UpperCAmelCase ) > 0:
while len(UpperCAmelCase ) < min_length:
a_ = toks + toks
# toks_str = [t[1] for t in toks]
a_ = [t[0] for t in toks]
# Ensure consistency
a_ = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
if " " not in output_txt and len(UpperCAmelCase ) > 1:
a_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase )
)
if with_prefix_space:
a_ = """ """ + output_txt
a_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
return output_txt, output_ids
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
a_ = """Unicode €."""
a_ = tokenizer(UpperCAmelCase )
a_ = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded["""input_ids"""] , UpperCAmelCase )
# decoding
a_ = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , """[CLS]Unicode €.[SEP]""" )
a_ = tokenizer("""e è é ê ë""" )
a_ = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded["""input_ids"""] , UpperCAmelCase )
# decoding
a_ = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , """[CLS]e è é ê ë[SEP]""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" )
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
a_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
a_ = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
a_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
if FRAMEWORK != "jax":
a_ = list(batch.input_ids.numpy()[0] )
else:
a_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
a_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
a_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , UpperCAmelCase )
self.assertIn("""attention_mask""" , UpperCAmelCase )
self.assertNotIn("""decoder_input_ids""" , UpperCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
a_ = [
"""Summary of the text.""",
"""Another summary.""",
]
a_ = tokenizer(
text_target=UpperCAmelCase , max_length=32 , padding="""max_length""" , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCAmelCase__ ( self ):
# safety check on max_len default value so we are sure the test works
a_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a_ = tempfile.mkdtemp()
a_ = """ He is very happy, UNwant\u00E9d,running"""
a_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
a_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
a_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
a_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a_ = tempfile.mkdtemp()
a_ = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
a_ = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
a_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
a_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
a_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a_ = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
a_ = json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
a_ = json.load(UpperCAmelCase )
a_ = [f'''<extra_id_{i}>''' for i in range(1_25 )]
a_ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
a_ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(UpperCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a_ = tokenizer_class.from_pretrained(
UpperCAmelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a_ = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCAmelCase )]
a_ = tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowerCAmelCase__ ( self ):
a_ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78] ) , """�""" )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
a_ = self.get_tokenizers(fast=UpperCAmelCase , do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
a_ = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
a_ = tokenizer.convert_tokens_to_string(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
| 511 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[Any] = logging.get_logger(__name__)
a_ : int = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "lxmert"
_lowerCamelCase = {}
def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=768 , UpperCamelCase=12 , UpperCamelCase=9500 , UpperCamelCase=1600 , UpperCamelCase=400 , UpperCamelCase=3072 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1e-12 , UpperCamelCase=9 , UpperCamelCase=5 , UpperCamelCase=5 , UpperCamelCase=2048 , UpperCamelCase=4 , UpperCamelCase=6.67 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = num_qa_labels
lowerCamelCase_ = num_object_labels
lowerCamelCase_ = num_attr_labels
lowerCamelCase_ = l_layers
lowerCamelCase_ = x_layers
lowerCamelCase_ = r_layers
lowerCamelCase_ = visual_feat_dim
lowerCamelCase_ = visual_pos_dim
lowerCamelCase_ = visual_loss_normalizer
lowerCamelCase_ = task_matched
lowerCamelCase_ = task_mask_lm
lowerCamelCase_ = task_obj_predict
lowerCamelCase_ = task_qa
lowerCamelCase_ = visual_obj_loss
lowerCamelCase_ = visual_attr_loss
lowerCamelCase_ = visual_feat_loss
lowerCamelCase_ = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**UpperCamelCase )
| 675 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = ["image_processor", "tokenizer"]
_lowerCamelCase = "OwlViTImageProcessor"
_lowerCamelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase , )
lowerCamelCase_ = kwargs.pop("feature_extractor" )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="max_length" , UpperCamelCase="np" , **UpperCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCamelCase , UpperCamelCase ) or (isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(text[0] , UpperCamelCase )):
lowerCamelCase_ = [self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )]
elif isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(text[0] , UpperCamelCase ):
lowerCamelCase_ = []
# Maximum number of queries across batch
lowerCamelCase_ = max([len(UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCamelCase ) != max_num_queries:
lowerCamelCase_ = t + [" "] * (max_num_queries - len(UpperCamelCase ))
lowerCamelCase_ = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
encodings.append(UpperCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
lowerCamelCase_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
lowerCamelCase_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCamelCase_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = input_ids
lowerCamelCase_ = attention_mask
if query_images is not None:
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = self.image_processor(
UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ).pixel_values
lowerCamelCase_ = query_pixel_values
if images is not None:
lowerCamelCase_ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def snake_case ( self , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase , )
return self.image_processor_class
@property
def snake_case ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase , )
return self.image_processor
| 675 | 1 |
from scipy.stats import spearmanr
import datasets
_a: Optional[Any] = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
_a: int = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
_a: List[Any] = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __A ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def __A ( self : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Dict=False ):
'''simple docstring'''
UpperCAmelCase_ = spearmanr(_lowercase , _lowercase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]} | 721 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a: List[Any] = logging.get_logger(__name__)
_a: List[str] = """▁"""
_a: Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
_a: Tuple = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
_a: Optional[int] = {
"""facebook/s2t-small-librispeech-asr""": 1024,
}
_a: List[Any] = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
_a: Tuple = {"""mustc""": MUSTC_LANGS}
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = MAX_MODEL_INPUT_SIZES
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE__ = []
def __init__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple="<s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Any="<pad>" , lowerCAmelCase : Union[str, Any]="<unk>" , lowerCAmelCase : Tuple=False , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : Any , ):
'''simple docstring'''
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , do_upper_case=lowerCAmelCase , do_lower_case=lowerCAmelCase , tgt_lang=lowerCAmelCase , lang_codes=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase_ = do_upper_case
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = load_json(lowerCAmelCase )
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = spm_file
UpperCAmelCase_ = load_spm(lowerCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
UpperCAmelCase_ = lang_codes
UpperCAmelCase_ = LANGUAGES[lang_codes]
UpperCAmelCase_ = [F"<lang:{lang}>" for lang in self.langs]
UpperCAmelCase_ = {lang: self.sp_model.PieceToId(F"<lang:{lang}>" ) for lang in self.langs}
UpperCAmelCase_ = self.lang_tokens
UpperCAmelCase_ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
UpperCAmelCase_ = {}
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return len(self.encoder )
@property
def __A ( self : Any ):
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def __A ( self : int , lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = new_tgt_lang
self.set_tgt_lang_special_tokens(lowerCAmelCase )
def __A ( self : List[Any] , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = self.lang_code_to_id[tgt_lang]
UpperCAmelCase_ = [lang_code_id]
def __A ( self : List[str] , lowerCAmelCase : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def __A ( self : str , lowerCAmelCase : int ):
'''simple docstring'''
return self.encoder.get(lowerCAmelCase , self.encoder[self.unk_token] )
def __A ( self : Optional[Any] , lowerCAmelCase : int ):
'''simple docstring'''
return self.decoder.get(lowerCAmelCase , self.unk_token )
def __A ( self : Dict , lowerCAmelCase : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCAmelCase_ = self.sp_model.decode(lowerCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCAmelCase_ = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase_ = self.sp_model.decode(lowerCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __A ( self : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int]=None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __A ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = [1] * len(self.prefix_tokens )
UpperCAmelCase_ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self : Tuple , lowerCAmelCase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = load_spm(self.spm_file , self.sp_model_kwargs )
def __A ( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase_ = Path(lowerCAmelCase )
assert save_dir.is_dir(), F"{save_directory} should be a directory"
UpperCAmelCase_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
UpperCAmelCase_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(lowerCAmelCase , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (str(lowerCAmelCase ), str(lowerCAmelCase ))
def __lowerCAmelCase ( A , A ):
UpperCAmelCase_ = sentencepiece.SentencePieceProcessor(**A )
spm.Load(str(A ) )
return spm
def __lowerCAmelCase ( A ):
with open(A , "r" ) as f:
return json.load(A )
def __lowerCAmelCase ( A , A ):
with open(A , "w" ) as f:
json.dump(A , A , indent=2 ) | 268 | 0 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __A ( A ):
'''simple docstring'''
@require_torch
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(A )
BertModel.from_pretrained(A )
BertTokenizer.from_pretrained(A )
pipeline(task='''fill-mask''' , model=A )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(A , env=A , check=A , capture_output=A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def a__ (self ) -> Dict:
"""simple docstring"""
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(A )
BertModel.from_pretrained(A )
BertTokenizer.from_pretrained(A )
pipeline(task='''fill-mask''' , model=A )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(A , env=A , check=A , capture_output=A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(A , env=A , check=A , capture_output=A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(A , env=A , check=A , capture_output=A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = '''
from transformers import pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_a = self.get_env()
_a = '''1'''
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_a = subprocess.run(A , env=A , check=A , capture_output=A )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = '''
from transformers import AutoModel
'''
_a = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(A , env=A , check=A , capture_output=A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(A , env=A , check=A , capture_output=A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 11 | '''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 390 | 0 |
def _A ( __snake_case :str ) -> bool:
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
__SCREAMING_SNAKE_CASE = sorted(string.lower() )
return len(__snake_case ) == len(set(__snake_case ) )
if __name__ == "__main__":
_snake_case : Any = input('Enter a string ').strip()
_snake_case : Dict = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 214 |
def _A ( __snake_case :int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError("check_bouncy() accepts only integer arguments" )
__SCREAMING_SNAKE_CASE = str(__snake_case )
__SCREAMING_SNAKE_CASE = "".join(sorted(__snake_case ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _A ( __snake_case :float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
while True:
if check_bouncy(__snake_case ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 214 | 1 |
from ...processing_utils import ProcessorMixin
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = """WhisperFeatureExtractor"""
snake_case__ = """WhisperTokenizer"""
def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.feature_extractor
UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[str]=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=_SCREAMING_SNAKE_CASE , language=_SCREAMING_SNAKE_CASE , no_timestamps=_SCREAMING_SNAKE_CASE )
def __call__( self : List[Any] , *_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs.pop('audio' , _SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs.pop('sampling_rate' , _SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase = args[0]
UpperCamelCase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
UpperCamelCase = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
UpperCamelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCamelCase = encodings['input_ids']
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict , *_SCREAMING_SNAKE_CASE : Tuple , **_SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Dict , *_SCREAMING_SNAKE_CASE : Dict , **_SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any]="np" ):
"""simple docstring"""
return self.tokenizer.get_prompt_ids(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
| 280 |
from collections.abc import Sequence
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase))
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> float:
"""simple docstring"""
UpperCamelCase = 0.0
for coeff in reversed(_UpperCamelCase):
UpperCamelCase = result * x + coeff
return result
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
__magic_name__ : List[str] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 280 | 1 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase ( ):
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 705 | """simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class a ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
A_ : Dict = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def UpperCamelCase ( ):
if os.name == "nt":
__a = CursorInfo()
__a = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCAmelCase , ctypes.byref(_lowerCAmelCase ) )
__a = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCAmelCase , ctypes.byref(_lowerCAmelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def UpperCamelCase ( ):
if os.name == "nt":
__a = CursorInfo()
__a = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCAmelCase , ctypes.byref(_lowerCAmelCase ) )
__a = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCAmelCase , ctypes.byref(_lowerCAmelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def UpperCamelCase ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 173 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Optional[int] = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
__lowerCamelCase : Any = 9.8_0_6_6_5
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = g ) -> float:
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 629 | 1 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
_UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase__ ( _A ):
'''simple docstring'''
def __init__( self : Dict , __A : Optional[int]=None , **__A : Optional[int] ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , __A , )
super().__init__(args=__A , **__A )
| 211 |
'''simple docstring'''
import argparse
import datetime
def _lowerCAmelCase( UpperCAmelCase_ : str ) -> str:
lowerCAmelCase__ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
lowerCAmelCase__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCAmelCase_ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
lowerCAmelCase__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
lowerCAmelCase__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
lowerCAmelCase__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
lowerCAmelCase__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
lowerCAmelCase__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
lowerCAmelCase__ = datetime.date(int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) )
# Start math
if m <= 2:
lowerCAmelCase__ = y - 1
lowerCAmelCase__ = m + 12
# maths var
lowerCAmelCase__ = int(str(UpperCAmelCase_ )[:2] )
lowerCAmelCase__ = int(str(UpperCAmelCase_ )[2:] )
lowerCAmelCase__ = int(2.6 * m - 5.39 )
lowerCAmelCase__ = int(c / 4 )
lowerCAmelCase__ = int(k / 4 )
lowerCAmelCase__ = int(d + k )
lowerCAmelCase__ = int(t + u + v + x )
lowerCAmelCase__ = int(z - (2 * c) )
lowerCAmelCase__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
lowerCAmelCase__ = F'''Your date {date_input}, is a {days[str(UpperCAmelCase_ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
_UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 211 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = KandinskyImgaImgPipeline
a = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
a = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
a = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
a = False
@property
def _lowerCamelCase ( self ):
return 32
@property
def _lowerCamelCase ( self ):
return 32
@property
def _lowerCamelCase ( self ):
return self.time_input_dim
@property
def _lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self ):
return 100
@property
def _lowerCamelCase ( self ):
A_ : Any = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
A_ : Any = MultilingualCLIP(a__ )
A_ : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : Union[str, Any] = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**a__ )
return model
@property
def _lowerCamelCase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCamelCase ( self ):
A_ : Any = self.dummy_text_encoder
A_ : List[str] = self.dummy_tokenizer
A_ : Dict = self.dummy_unet
A_ : Union[str, Any] = self.dummy_movq
A_ : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
A_ : Tuple = DDIMScheduler(**a__ )
A_ : Dict = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _lowerCamelCase ( self , a__ , a__=0 ):
A_ : Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ )
A_ : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ )
# create init_image
A_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
A_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : str = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(a__ ).startswith("""mps""" ):
A_ : int = torch.manual_seed(a__ )
else:
A_ : List[str] = torch.Generator(device=a__ ).manual_seed(a__ )
A_ : Optional[int] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _lowerCamelCase ( self ):
A_ : Dict = """cpu"""
A_ : List[Any] = self.get_dummy_components()
A_ : Any = self.pipeline_class(**a__ )
A_ : Union[str, Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A_ : List[str] = pipe(**self.get_dummy_inputs(a__ ) )
A_ : Any = output.images
A_ : Any = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
A_ : str = image[0, -3:, -3:, -1]
A_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Optional[int] = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
A_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A_ : Any = """A red cartoon frog, 4k"""
A_ : List[Any] = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a__ )
A_ : Optional[int] = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
A_ : int = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
A_ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ , A_ : str = pipe_prior(
a__ , generator=a__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
A_ : Optional[Any] = pipeline(
a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
A_ : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
| 569 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = TypeVar("""DatasetType""", Dataset, IterableDataset)
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = "first_exhausted" ,):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase ,(Dataset, IterableDataset) ):
if isinstance(_lowerCAmelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(_lowerCAmelCase )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_lowerCAmelCase ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCAmelCase ).__name__}.""" )
if i == 0:
A_ , A_ : Optional[int] = (
(Dataset, IterableDataset) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,info=_lowerCAmelCase ,split=_lowerCAmelCase ,stopping_strategy=_lowerCAmelCase )
else:
return _interleave_iterable_datasets(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,info=_lowerCAmelCase ,split=_lowerCAmelCase ,stopping_strategy=_lowerCAmelCase )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = 0 ,):
'''simple docstring'''
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase ,(Dataset, IterableDataset) ):
if isinstance(_lowerCAmelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(_lowerCAmelCase )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_lowerCAmelCase ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_lowerCAmelCase ).__name__}.""" )
if i == 0:
A_ , A_ : Dict = (
(Dataset, IterableDataset) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_lowerCAmelCase ,info=_lowerCAmelCase ,split=_lowerCAmelCase ,axis=_lowerCAmelCase )
else:
return _concatenate_iterable_datasets(_lowerCAmelCase ,info=_lowerCAmelCase ,split=_lowerCAmelCase ,axis=_lowerCAmelCase )
| 569 | 1 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] ) ->List[Any]:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
UpperCAmelCase_ = img
UpperCAmelCase_ = img.shape[1]
UpperCAmelCase_ = img.shape[0]
UpperCAmelCase_ = dst_width
UpperCAmelCase_ = dst_height
UpperCAmelCase_ = self.src_w / self.dst_w
UpperCAmelCase_ = self.src_h / self.dst_h
UpperCAmelCase_ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
UpperCAmelCase_ = self.img[self.get_y(lowerCAmelCase__ )][self.get_x(lowerCAmelCase__ )]
def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : Tuple ) ->int:
return int(self.ratio_x * x )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : List[str] ) ->int:
return int(self.ratio_y * y )
if __name__ == "__main__":
lowercase__ : Any = 800, 600
lowercase__ : Union[str, Any] = imread("image_data/lena.jpg", 1)
lowercase__ : List[Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 709 | '''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase__ : Optional[Any] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Any=None , _UpperCamelCase : int=None , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=99 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Union[str, Any]=0.02 , ) ->Optional[int]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = initializer_range
def lowerCAmelCase__ ( self : int ) ->Any:
UpperCAmelCase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , )
UpperCAmelCase_ = prepare_blenderbot_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]:
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) ->Tuple:
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase_ , UpperCAmelCase_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) ->Union[str, Any]:
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase_ , UpperCAmelCase_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
UpperCAmelCase_ = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = 99
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
UpperCAmelCase_ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ = input_ids.shape[0]
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase__ ( self : Any ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_config_and_data()
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ )
UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ )
UpperCAmelCase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str ) ->int:
UpperCAmelCase_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase__ )
UpperCAmelCase_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase_ = lm_model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
UpperCAmelCase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
UpperCAmelCase_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase_ = shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
UpperCAmelCase_ = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase ( lowerCamelCase , unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
UpperCAmelCase_ = FlaxBlenderbotModelTester(self )
def lowerCAmelCase__ ( self : str ) ->Tuple:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = model_class(UpperCAmelCase__ )
@jax.jit
def encode_jitted(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : Union[str, Any] ):
return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ = encode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self : str ) ->str:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = model_class(UpperCAmelCase__ )
UpperCAmelCase_ = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase_ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ):
return model.decode(
decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ = decode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase__ ( self : int ) ->int:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ = model(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
UpperCAmelCase_ = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase_ = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCAmelCase__ )
UpperCAmelCase_ = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase_ = ['''Sam''']
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , return_tensors='''jax''' )
UpperCAmelCase_ = model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase_ = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase_ = tokenizer.batch_decode(UpperCAmelCase__ , **UpperCAmelCase__ )
assert generated_txt[0].strip() == tgt_text
| 43 | 0 |
"""simple docstring"""
def __A ( a_ : int = 10 , a_ : str = 10_00 , a_ : Optional[int] = True )-> Optional[int]:
'''simple docstring'''
assert (
isinstance(a__ , a__ )
and isinstance(a__ , a__ )
and isinstance(a__ , a__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def __A ( a_ : Any , a_ : List[Any] )-> Union[str, Any]:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def __A ( a_ : Optional[Any] , a_ : Optional[Any] , a_ : Optional[int] )-> Optional[Any]:
'''simple docstring'''
assert (
isinstance(a__ , a__ ) and isinstance(a__ , a__ ) and isinstance(a__ , a__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(a_ : Tuple ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
SCREAMING_SNAKE_CASE : str = lower
SCREAMING_SNAKE_CASE : List[Any] = higher
SCREAMING_SNAKE_CASE : List[Any] = []
while True:
SCREAMING_SNAKE_CASE : Tuple = get_avg(a__ , a__ )
last_numbers.append(a__ )
if answer(a__ ) == "low":
SCREAMING_SNAKE_CASE : str = number
elif answer(a__ ) == "high":
SCREAMING_SNAKE_CASE : Optional[int] = number
else:
break
print(F"guess the number : {last_numbers[-1]}" )
print(F"details : {last_numbers!s}" )
def __A ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = int(input('''Enter lower value : ''' ).strip() )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('''Enter high value : ''' ).strip() )
SCREAMING_SNAKE_CASE : int = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(a__ , a__ , a__ )
if __name__ == "__main__":
main()
| 698 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class A__:
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.0_2 , _lowercase=3 , _lowercase=4 , _lowercase=None , ) -> Tuple:
a_ : Union[str, Any] = parent
a_ : int = batch_size
a_ : int = seq_length
a_ : int = is_training
a_ : List[Any] = use_input_mask
a_ : str = use_token_type_ids
a_ : List[str] = use_labels
a_ : List[Any] = vocab_size
a_ : Dict = hidden_size
a_ : List[Any] = num_hidden_layers
a_ : str = num_attention_heads
a_ : str = intermediate_size
a_ : Optional[int] = hidden_act
a_ : Optional[int] = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : List[str] = max_position_embeddings
a_ : Optional[Any] = type_vocab_size
a_ : Any = type_sequence_label_size
a_ : List[Any] = initializer_range
a_ : List[str] = num_labels
a_ : Union[str, Any] = num_choices
a_ : Dict = scope
def UpperCamelCase__ ( self ) -> Any:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Union[str, Any] = None
if self.use_input_mask:
a_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
a_ : List[Any] = None
if self.use_token_type_ids:
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : Union[str, Any] = None
a_ : Dict = None
a_ : Any = None
if self.use_labels:
a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : int = ids_tensor([self.batch_size] , self.num_choices )
a_ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) -> Any:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , use_stable_embedding=_lowercase , )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
a_ : Optional[Any] = OpenLlamaModel(config=_lowercase )
model.to(_lowercase )
model.eval()
a_ : List[Any] = model(_lowercase , attention_mask=_lowercase )
a_ : Optional[Any] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Union[str, Any]:
a_ : int = True
a_ : Union[str, Any] = OpenLlamaModel(_lowercase )
model.to(_lowercase )
model.eval()
a_ : Optional[int] = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
a_ : Any = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , )
a_ : Optional[int] = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Any:
a_ : Dict = OpenLlamaForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
a_ : int = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> List[str]:
a_ : Dict = True
a_ : Optional[int] = True
a_ : Dict = OpenLlamaForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
# first forward pass
a_ : List[str] = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , use_cache=_lowercase , )
a_ : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a_ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
a_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a_ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
a_ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
a_ : Optional[int] = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , output_hidden_states=_lowercase , )["""hidden_states"""][0]
a_ : Dict = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , past_key_values=_lowercase , output_hidden_states=_lowercase , )["""hidden_states"""][0]
# select random slice
a_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a_ : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Any = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Union[str, Any] = config_and_inputs
a_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__(a_, a_, a_, unittest.TestCase ):
"""simple docstring"""
_A : Tuple = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_A : int = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_A : Optional[int] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : Any = False
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Optional[int] = OpenLlamaModelTester(self )
a_ : Optional[int] = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Any:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCamelCase__ ( self ) -> Tuple:
a_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ : Dict = type
self.model_tester.create_and_check_model(*_lowercase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Any = 3
a_ : List[str] = input_dict["""input_ids"""]
a_ : List[str] = input_ids.ne(1 ).to(_lowercase )
a_ : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a_ : int = OpenLlamaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
a_ : Tuple = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Union[str, Any] = 3
a_ : List[Any] = """single_label_classification"""
a_ : Dict = input_dict["""input_ids"""]
a_ : int = input_ids.ne(1 ).to(_lowercase )
a_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a_ : Any = OpenLlamaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
a_ : Any = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ) -> Tuple:
a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Any = 3
a_ : List[str] = """multi_label_classification"""
a_ : Dict = input_dict["""input_ids"""]
a_ : Any = input_ids.ne(1 ).to(_lowercase )
a_ : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a_ : Optional[int] = OpenLlamaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
a_ : int = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCamelCase__ ( self ) -> Tuple:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCamelCase__ ( self , _lowercase ) -> str:
a_ , a_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : List[Any] = ids_tensor([1, 10] , config.vocab_size )
a_ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a_ : Union[str, Any] = OpenLlamaModel(_lowercase )
original_model.to(_lowercase )
original_model.eval()
a_ : Union[str, Any] = original_model(_lowercase ).last_hidden_state
a_ : str = original_model(_lowercase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a_ : int = {"""type""": scaling_type, """factor""": 1_0.0}
a_ : Union[str, Any] = OpenLlamaModel(_lowercase )
scaled_model.to(_lowercase )
scaled_model.eval()
a_ : Optional[int] = scaled_model(_lowercase ).last_hidden_state
a_ : Tuple = scaled_model(_lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
| 540 | 0 |
import fire
from utils import calculate_rouge, save_json
def _lowerCAmelCase ( _a : Union[str, Any] , _a : str , _a : Optional[Any]=None , **_a : int ) -> int:
lowerCAmelCase_ : Union[str, Any] = [x.strip() for x in open(_lowerCAmelCase ).readlines()]
lowerCAmelCase_ : str = [x.strip() for x in open(_lowerCAmelCase ).readlines()][: len(_lowerCAmelCase )]
lowerCAmelCase_ : Dict = calculate_rouge(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
if save_path is not None:
save_json(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 701 |
from __future__ import annotations
def _lowerCAmelCase ( _a : list[int] ) -> list[int]: # This function is recursive
lowerCAmelCase_ : List[Any] = len(_a )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCAmelCase_ : Union[str, Any] = array[0]
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Tuple = [element for element in array[i:] if element >= array[i]]
lowerCAmelCase_ : Any = longest_subsequence(_a )
if len(_a ) > len(_a ):
lowerCAmelCase_ : List[Any] = temp_array
else:
i += 1
lowerCAmelCase_ : Tuple = [element for element in array[1:] if element >= pivot]
lowerCAmelCase_ : List[Any] = [pivot, *longest_subsequence(_a )]
if len(_a ) > len(_a ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 440 | 0 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = NystromformerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = NystromformerForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = NystromformerForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NystromformerForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NystromformerForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = NystromformerForMultipleChoice(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : Tuple = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : Any = False
__A : Optional[Any] = False
def UpperCamelCase( self ):
_UpperCAmelCase = NystromformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = NystromformerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_UpperCAmelCase = model(_UpperCamelCase )[0]
_UpperCAmelCase = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = '''the [MASK] of Belgium is Brussels'''
_UpperCAmelCase = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCAmelCase = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCAmelCase = tokenizer(_UpperCamelCase , return_tensors='''pt''' )
with torch.no_grad():
_UpperCAmelCase = model(encoding.input_ids ).logits
_UpperCAmelCase = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(_UpperCamelCase ) , '''capital''' ) | 32 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "efficientnet"
def __init__( self : Optional[Any] , __snake_case : int = 3 , __snake_case : int = 6_0_0 , __snake_case : float = 2.0 , __snake_case : float = 3.1 , __snake_case : int = 8 , __snake_case : List[int] = [3, 3, 5, 3, 5, 5, 3] , __snake_case : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __snake_case : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __snake_case : List[int] = [] , __snake_case : List[int] = [1, 2, 2, 2, 1, 2, 1] , __snake_case : List[int] = [1, 2, 2, 3, 3, 4, 1] , __snake_case : List[int] = [1, 6, 6, 6, 6, 6, 6] , __snake_case : float = 0.25 , __snake_case : str = "swish" , __snake_case : int = 2_5_6_0 , __snake_case : str = "mean" , __snake_case : float = 0.02 , __snake_case : float = 0.001 , __snake_case : float = 0.99 , __snake_case : float = 0.5 , __snake_case : float = 0.2 , **__snake_case : List[Any] , ) -> List[Any]:
super().__init__(**__snake_case )
__magic_name__: str = num_channels
__magic_name__: List[str] = image_size
__magic_name__: List[str] = width_coefficient
__magic_name__: Optional[Any] = depth_coefficient
__magic_name__: Tuple = depth_divisor
__magic_name__: Dict = kernel_sizes
__magic_name__: int = in_channels
__magic_name__: str = out_channels
__magic_name__: Dict = depthwise_padding
__magic_name__: Union[str, Any] = strides
__magic_name__: Dict = num_block_repeats
__magic_name__: Tuple = expand_ratios
__magic_name__: List[str] = squeeze_expansion_ratio
__magic_name__: Any = hidden_act
__magic_name__: Tuple = hidden_dim
__magic_name__: int = pooling_type
__magic_name__: int = initializer_range
__magic_name__: List[str] = batch_norm_eps
__magic_name__: str = batch_norm_momentum
__magic_name__: List[str] = dropout_rate
__magic_name__: Dict = drop_connect_rate
__magic_name__: Optional[Any] = sum(__snake_case ) * 4
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self : List[Any] ) -> float:
return 1E-5
| 96 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_UpperCAmelCase : Optional[int] = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 453 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class __lowerCAmelCase :
def __init__( self: Dict , _lowerCAmelCase: Union[str, Any]=None , **_lowerCAmelCase: Any ):
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
lowercase :str = model
lowercase :Any = kwargs.get("model_save_dir" , _lowerCAmelCase )
lowercase :Dict = kwargs.get("latest_model_name" , _lowerCAmelCase )
def __call__( self: Optional[Any] , **_lowerCAmelCase: List[Any] ):
lowercase :List[str] = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def SCREAMING_SNAKE_CASE ( _lowerCAmelCase: Union[str, Path] , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: List[Any]=None ):
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
lowercase :int = "CPUExecutionProvider"
return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Union[str, Path] , _lowerCAmelCase: Optional[str] = None , **_lowerCAmelCase: Any ):
lowercase :Optional[int] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowercase :Tuple = self.model_save_dir.joinpath(self.latest_model_name )
lowercase :Optional[int] = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowercase :Tuple = self.model_save_dir.joinpath(_lowerCAmelCase )
if src_path.exists():
lowercase :Dict = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: Union[str, os.PathLike] , **_lowerCAmelCase: int , ):
if os.path.isfile(_lowerCAmelCase ):
logger.error(F"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# saving model weights/files
self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls: Dict , _lowerCAmelCase: Union[str, Path] , _lowerCAmelCase: Optional[Union[bool, str, None]] = None , _lowerCAmelCase: Optional[Union[str, None]] = None , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional[str] = None , _lowerCAmelCase: Optional[str] = None , _lowerCAmelCase: Optional[str] = None , _lowerCAmelCase: Optional["ort.SessionOptions"] = None , **_lowerCAmelCase: Optional[int] , ):
lowercase :List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowerCAmelCase ):
lowercase :Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
lowercase :Optional[Any] = Path(_lowerCAmelCase )
# load model from hub
else:
# download model
lowercase :str = hf_hub_download(
repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , )
lowercase :Optional[int] = Path(_lowerCAmelCase ).parent
lowercase :Tuple = Path(_lowerCAmelCase ).name
lowercase :Tuple = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
return cls(model=_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls: str , _lowerCAmelCase: Union[str, Path] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[str] = None , _lowerCAmelCase: Optional[str] = None , **_lowerCAmelCase: Any , ):
lowercase :List[str] = None
if len(str(_lowerCAmelCase ).split("@" ) ) == 2:
lowercase , lowercase :Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
| 453 | 1 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowerCAmelCase__ =ProphetNetTokenizer
lowerCAmelCase__ =False
def __a ( self : str ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __a ( self : Union[str, Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = "UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE_ = "unwanted, running"
return input_text, output_text
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
SCREAMING_SNAKE_CASE_ = {}
for i, token in enumerate(__a ):
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
SCREAMING_SNAKE_CASE_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
SCREAMING_SNAKE_CASE_ = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
SCREAMING_SNAKE_CASE_ = tokenizer(__a , padding=__a , return_tensors='pt' )
self.assertIsInstance(__a , __a )
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __a ( self : int ):
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __a ( self : Optional[int] ):
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __a ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('sequence builders' , add_special_tokens=__a )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(__a )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02] | 360 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
lowerCamelCase__ = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'A csv or a json file containing the training data.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'A csv or a json file containing the validation data.'})
lowerCamelCase__ = field(default=a , metadata={'help': 'A csv or a json file containing the test data.'})
def snake_case__ ( self):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name.")
else:
_lowerCAmelCase : List[Any] = self.train_file.split(".")[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowerCAmelCase : Optional[Any] = self.validation_file.split(".")[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCamelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Dict = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
datasets.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_lowerCAmelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowerCAmelCase : List[str] = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowerCAmelCase : Optional[Any] = data_args.train_file.split("." )[-1]
_lowerCAmelCase : Optional[Any] = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowerCAmelCase : Optional[Any] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
_lowerCAmelCase : str = load_dataset("csv" , data_files=_lowerCamelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowerCAmelCase : Tuple = load_dataset("json" , data_files=_lowerCamelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowerCAmelCase : List[Any] = raw_datasets["train"].features["label"].names
_lowerCAmelCase : str = len(_lowerCamelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowerCAmelCase : int = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_lowerCamelCase , )
_lowerCAmelCase : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowerCAmelCase : Optional[Any] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowerCAmelCase : List[str] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowerCAmelCase : List[str] = {"Refused": 0, "Entailed": 1}
_lowerCAmelCase : List[str] = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_lowerCAmelCase : Tuple = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_lowerCamelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(_lowerCamelCase ):
_lowerCAmelCase : Tuple = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
_lowerCAmelCase : Any = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowerCAmelCase : str = examples["statement"]
_lowerCAmelCase : List[Any] = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
_lowerCAmelCase : List[Any] = tokenizer(_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
_lowerCAmelCase : Tuple = raw_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_lowerCAmelCase : Optional[int] = raw_datasets["train"]
if data_args.max_train_samples is not None:
_lowerCAmelCase : str = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_lowerCAmelCase : Dict = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_lowerCAmelCase : Dict = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
_lowerCAmelCase : List[Any] = raw_datasets["test"]
if data_args.max_predict_samples is not None:
_lowerCAmelCase : Tuple = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_lowerCamelCase ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase ):
_lowerCAmelCase : Any = p.predictions[0] if isinstance(p.predictions , _lowerCamelCase ) else p.predictions
_lowerCAmelCase : List[Any] = np.argmax(_lowerCamelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowerCAmelCase : Any = default_data_collator
elif training_args.fpaa:
_lowerCAmelCase : Any = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8 )
else:
_lowerCAmelCase : Union[str, Any] = None
# Initialize our Trainer
_lowerCAmelCase : Optional[Any] = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
_lowerCAmelCase : Dict = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : str = last_checkpoint
_lowerCAmelCase : List[Any] = trainer.train(resume_from_checkpoint=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = train_result.metrics
_lowerCAmelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
_lowerCAmelCase : Union[str, Any] = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , _lowerCamelCase )
trainer.save_metrics("train" , _lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCAmelCase : Optional[int] = trainer.evaluate(eval_dataset=_lowerCamelCase )
_lowerCAmelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase )
_lowerCAmelCase : List[Any] = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics("eval" , _lowerCamelCase )
trainer.save_metrics("eval" , _lowerCamelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowerCAmelCase : Optional[int] = predict_dataset.remove_columns("label" )
_lowerCAmelCase : Optional[Any] = trainer.predict(_lowerCamelCase , metric_key_prefix="predict" ).predictions
_lowerCAmelCase : Tuple = np.argmax(_lowerCamelCase , axis=1 )
_lowerCAmelCase : Union[str, Any] = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
_lowerCAmelCase : Optional[Any] = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase )
else:
trainer.create_model_card(**_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 500 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : int = logging.get_logger(__name__)
A_ : str = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''falcon'''
_UpperCAmelCase = ['''past_key_values''']
def __init__( self : int , __lowerCAmelCase : Tuple=6_5024 , __lowerCAmelCase : int=4544 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Union[str, Any]=71 , __lowerCAmelCase : Any=1E-5 , __lowerCAmelCase : Union[str, Any]=0.0_2 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Dict=11 , __lowerCAmelCase : Any=11 , **__lowerCAmelCase : Any , ) -> int:
"""simple docstring"""
a = vocab_size
# Backward compatibility with n_embed kwarg
a = kwargs.pop("n_embed" , __lowerCAmelCase )
a = hidden_size if n_embed is None else n_embed
a = num_hidden_layers
a = num_attention_heads
a = layer_norm_epsilon
a = initializer_range
a = use_cache
a = hidden_dropout
a = attention_dropout
a = bos_token_id
a = eos_token_id
a = num_attention_heads if num_kv_heads is None else num_kv_heads
a = alibi
a = new_decoder_architecture
a = multi_query # Ignored when new_decoder_architecture is True
a = parallel_attn
a = bias
super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def A ( self : Dict ) -> Tuple:
"""simple docstring"""
return not self.alibi
| 708 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=99 , __lowerCAmelCase : List[str]=64 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[str]:
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = embedding_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def A ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int ) -> List[str]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = MobileBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
a = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> str:
"""simple docstring"""
a = MobileBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
a = MobileBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A ( self : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
a = MobileBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
a = MobileBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
a = self.num_labels
a = MobileBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
a = self.num_choices
a = MobileBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
_UpperCAmelCase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
def A ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=False ) -> Any:
"""simple docstring"""
a = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def A ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
a = MobileBertModelTester(self )
a = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def A ( self : int ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : str ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def A ( self : str ) -> str:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def A ( self : int ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def A ( self : List[Any] ) -> int:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def A ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def A ( self : int ) -> Tuple:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
return torch.tensor(
UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ , )
A_ : Dict = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(__lowerCAmelCase )
a = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
a = model(__lowerCAmelCase )[0]
a = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __lowerCAmelCase )
a = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=__lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
a = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
a = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 32 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any , _A : str ):
_UpperCamelCase = 3
_UpperCamelCase = 250
_UpperCamelCase = ids_tensor((batch_size, length) , _A )
_UpperCamelCase = torch.ones((batch_size, length) , device=_A , dtype=torch.float ) / length
return input_ids, scores
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
_UpperCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = MaxLengthCriteria(max_length=10 )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(_A , _A ) )
_UpperCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
_UpperCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_A , _A ) )
_UpperCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_A , _A ) )
def UpperCamelCase_ ( self : Any ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_UpperCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_A ) , 1 )
| 10 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "trocr"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : List[str] , _A : Optional[Any]=5_0265 , _A : Optional[Any]=1024 , _A : Optional[Any]=12 , _A : Any=16 , _A : Any=4096 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=512 , _A : Dict=0.1 , _A : List[str]=0.0 , _A : Optional[Any]=0.0 , _A : Union[str, Any]=2 , _A : Any=0.02 , _A : List[str]=0.0 , _A : List[str]=True , _A : str=False , _A : List[str]=True , _A : Optional[Any]=True , _A : Optional[int]=1 , _A : int=0 , _A : Any=2 , **_A : Optional[int] , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_learned_position_embeddings
_UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 10 | 1 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self, __a, __a=13, __a=30, __a=2, __a=3, __a=True, __a=True, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=10, __a=0.02, ):
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : Optional[int] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Dict = (image_size // patch_size) ** 2
_lowerCAmelCase : List[Any] = num_patches + 1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : List[Any] = ViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__a, initializer_range=self.initializer_range, )
return config, pixel_values
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = FlaxViTModel(config=__a)
_lowerCAmelCase : Dict = model(__a)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Optional[int] = (self.image_size, self.image_size)
_lowerCAmelCase : Any = (self.patch_size, self.patch_size)
_lowerCAmelCase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.type_sequence_label_size
_lowerCAmelCase : Optional[Any] = FlaxViTForImageClassification(config=__a)
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowerCAmelCase : Any = 1
_lowerCAmelCase : List[str] = FlaxViTForImageClassification(__a)
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowerCAmelCase : str = model(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[int] = config_and_inputs
_lowerCAmelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = FlaxViTModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, has_text_modality=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(__a)
_lowerCAmelCase : Optional[Any] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_lowerCAmelCase : int = self._prepare_for_class(__a, __a)
_lowerCAmelCase : Union[str, Any] = model_class(__a)
@jax.jit
def model_jitted(__a, **__a):
return model(pixel_values=__a, **__a)
with self.subTest("JIT Enabled"):
_lowerCAmelCase : str = model_jitted(**__a).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
_lowerCAmelCase : Union[str, Any] = model_jitted(**__a).to_tuple()
self.assertEqual(len(__a), len(__a))
for jitted_output, output in zip(__a, __a):
self.assertEqual(jitted_output.shape, output.shape)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class_name.from_pretrained("google/vit-base-patch16-224")
_lowerCAmelCase : Union[str, Any] = model(np.ones((1, 3, 224, 224)))
self.assertIsNotNone(__a)
| 658 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 1 |
'''simple docstring'''
from math import factorial
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ = 20 ):
__a : Optional[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__a : int = n // 2
return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
SCREAMING_SNAKE_CASE_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 597 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_ = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["BeitFeatureExtractor"]
SCREAMING_SNAKE_CASE_ = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 597 | 1 |
import random
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
_lowercase : Tuple = num - 1
_lowercase : Tuple = 0
while s % 2 == 0:
_lowercase : Tuple = s // 2
t += 1
for _ in range(5 ):
_lowercase : Dict = random.randrange(2 , num - 1 )
_lowercase : List[Any] = pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if v != 1:
_lowercase : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_lowercase : Tuple = i + 1
_lowercase : Any = (v**2) % num
return True
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
if num < 2:
return False
_lowercase : Union[str, Any] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__UpperCamelCase )
def __magic_name__ ( SCREAMING_SNAKE_CASE = 1_024 ) -> int:
while True:
_lowercase : Optional[Any] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__UpperCamelCase ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__lowercase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
__lowercase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
__lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
else:
__lowercase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
__lowercase = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(lowerCamelCase )[0].split(""".""" )[-2]
__lowercase = mapped_key.replace("""*""" , lowerCamelCase )
if "weight_g" in name:
__lowercase = """weight_g"""
elif "weight_v" in name:
__lowercase = """weight_v"""
elif "weight" in name:
__lowercase = """weight"""
elif "bias" in name:
__lowercase = """bias"""
else:
__lowercase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = full_name.split("""conv_layers.""" )[-1]
__lowercase = name.split(""".""" )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__lowercase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__lowercase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__lowercase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__lowercase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = SEWConfig()
if is_finetuned:
__lowercase = model.wav_encoder.wav_model.cfg
else:
__lowercase = model.cfg
__lowercase = fs_config.conv_bias
__lowercase = eval(fs_config.conv_feature_layers )
__lowercase = [x[0] for x in conv_layers]
__lowercase = [x[1] for x in conv_layers]
__lowercase = [x[2] for x in conv_layers]
__lowercase = """gelu"""
__lowercase = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
__lowercase = 0.0
__lowercase = fs_config.activation_fn.name
__lowercase = fs_config.encoder_embed_dim
__lowercase = 0.02
__lowercase = fs_config.encoder_ffn_embed_dim
__lowercase = 1e-5
__lowercase = fs_config.encoder_layerdrop
__lowercase = fs_config.encoder_attention_heads
__lowercase = fs_config.conv_pos_groups
__lowercase = fs_config.conv_pos
__lowercase = len(lowerCamelCase )
__lowercase = fs_config.encoder_layers
__lowercase = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__lowercase = model.cfg
__lowercase = fs_config.final_dropout
__lowercase = fs_config.layerdrop
__lowercase = fs_config.activation_dropout
__lowercase = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__lowercase = fs_config.attention_dropout
__lowercase = fs_config.dropout_input
__lowercase = fs_config.dropout
__lowercase = fs_config.mask_channel_length
__lowercase = fs_config.mask_channel_prob
__lowercase = fs_config.mask_length
__lowercase = fs_config.mask_prob
__lowercase = """Wav2Vec2FeatureExtractor"""
__lowercase = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True ):
'''simple docstring'''
if is_finetuned:
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__lowercase = SEWConfig.from_pretrained(lowerCamelCase )
else:
__lowercase = convert_config(model[0] , lowerCamelCase )
__lowercase = model[0].eval()
__lowercase = True if config.feat_extract_norm == """layer""" else False
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase , return_attention_mask=lowerCamelCase , )
if is_finetuned:
if dict_path:
__lowercase = Dictionary.load(lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase = target_dict.pad_index
__lowercase = target_dict.bos_index
__lowercase = target_dict.pad_index
__lowercase = target_dict.bos_index
__lowercase = target_dict.eos_index
__lowercase = len(target_dict.symbols )
__lowercase = os.path.join(lowerCamelCase , """vocab.json""" )
if not os.path.isdir(lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase ) )
return
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , lowerCamelCase )
__lowercase = WavaVecaCTCTokenizer(
lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase , )
__lowercase = WavaVecaProcessor(feature_extractor=lowerCamelCase , tokenizer=lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
__lowercase = SEWForCTC(lowerCamelCase )
else:
__lowercase = SEWModel(lowerCamelCase )
feature_extractor.save_pretrained(lowerCamelCase )
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
hf_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 80 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = DPTImageProcessor if is_vision_available() else None
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = DPTImageProcessingTester(self )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 11 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE( __A , __A , __A , unittest.TestCase ):
snake_case_ : Dict = StableDiffusionInpaintPipeline
snake_case_ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
snake_case_ : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case_ : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case_ : Optional[int] = frozenset([] )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase__ , )
__lowercase = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
__lowercase = CLIPTextModel(lowerCamelCase__ )
__lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Optional[Any]:
"""simple docstring"""
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) )
__lowercase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(lowerCamelCase__ ).startswith("""mps""" ):
__lowercase = torch.manual_seed(lowerCamelCase__ )
else:
__lowercase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case__ ( self ) -> Any:
"""simple docstring"""
__lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionInpaintPipeline(**lowerCamelCase__ )
__lowercase = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowercase = self.get_dummy_inputs(lowerCamelCase__ )
__lowercase = sd_pipe(**lowerCamelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
def snake_case__ ( self ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowercase = """stabilityai/stable-diffusion-2-inpainting"""
__lowercase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase__ , safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__lowercase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowercase = """stabilityai/stable-diffusion-2-inpainting"""
__lowercase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase__ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase__ , )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__lowercase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case__ ( self ) -> str:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowercase = """stabilityai/stable-diffusion-2-inpainting"""
__lowercase = PNDMScheduler.from_pretrained(lowerCamelCase__ , subfolder="""scheduler""" )
__lowercase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase__ , safety_checker=lowerCamelCase__ , scheduler=lowerCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowercase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 709 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
A : List[Any] = logging.get_logger(__name__)
A : str = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def snake_case_ ( a__ : str ,a__ : str ):
"""simple docstring"""
__lowercase = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 10_24,
"""hidden_size""": 7_68,
"""max_length""": 5_12,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 10_24,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
__lowercase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__lowercase = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] ,num_layers=predefined_args["""num_layers"""] ,units=predefined_args["""units"""] ,hidden_size=predefined_args["""hidden_size"""] ,max_length=predefined_args["""max_length"""] ,num_heads=predefined_args["""num_heads"""] ,scaled=predefined_args["""scaled"""] ,dropout=predefined_args["""dropout"""] ,output_attention=a__ ,output_all_encodings=a__ ,use_residual=predefined_args["""use_residual"""] ,activation=predefined_args.get("""activation""" ,"""gelu""" ) ,layer_norm_eps=predefined_args.get("""layer_norm_eps""" ,a__ ) ,)
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__lowercase = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
__lowercase = os.path.join(get_home_dir() ,"""models""" )
__lowercase = _load_vocab(a__ ,a__ ,a__ ,cls=a__ )
__lowercase = nlp.model.BERTModel(
a__ ,len(a__ ) ,units=predefined_args["""units"""] ,embed_size=predefined_args["""embed_size"""] ,embed_dropout=predefined_args["""embed_dropout"""] ,word_embed=predefined_args["""word_embed"""] ,use_pooler=a__ ,use_token_type_embed=a__ ,token_type_vocab_size=predefined_args["""token_type_vocab_size"""] ,use_classifier=a__ ,use_decoder=a__ ,)
original_bort.load_parameters(a__ ,cast_dtype=a__ ,ignore_extra=a__ )
__lowercase = original_bort._collect_params_with_prefix()
# Build our config 🤗
__lowercase = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.0_2,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(a__ ),
}
__lowercase = BertConfig.from_dict(a__ )
__lowercase = BertForMaskedLM(a__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(a__ : Union[str, Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(a__ : List[str] ,a__ : Union[str, Any] ):
__lowercase = hf_param.shape
__lowercase = to_torch(params[gluon_param] )
__lowercase = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight ,"""word_embed.0.weight""" )
__lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight ,"""encoder.position_weight""" )
__lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias ,"""encoder.layer_norm.beta""" )
__lowercase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight ,"""encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__lowercase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__lowercase = hf_bort_model.bert.encoder.layer[i]
# self attention
__lowercase = layer.attention.self
__lowercase = check_and_map_params(
self_attn.key.bias.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__lowercase = check_and_map_params(
self_attn.key.weight.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__lowercase = check_and_map_params(
self_attn.query.bias.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__lowercase = check_and_map_params(
self_attn.query.weight.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__lowercase = check_and_map_params(
self_attn.value.bias.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__lowercase = check_and_map_params(
self_attn.value.weight.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__lowercase = layer.attention.output
__lowercase = check_and_map_params(
self_output.dense.bias ,f'encoder.transformer_cells.{i}.proj.bias' )
__lowercase = check_and_map_params(
self_output.dense.weight ,f'encoder.transformer_cells.{i}.proj.weight' )
__lowercase = check_and_map_params(
self_output.LayerNorm.bias ,f'encoder.transformer_cells.{i}.layer_norm.beta' )
__lowercase = check_and_map_params(
self_output.LayerNorm.weight ,f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__lowercase = layer.intermediate
__lowercase = check_and_map_params(
intermediate.dense.bias ,f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__lowercase = check_and_map_params(
intermediate.dense.weight ,f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__lowercase = layer.output
__lowercase = check_and_map_params(
bert_output.dense.bias ,f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__lowercase = check_and_map_params(
bert_output.dense.weight ,f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__lowercase = check_and_map_params(
bert_output.LayerNorm.bias ,f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__lowercase = check_and_map_params(
bert_output.LayerNorm.weight ,f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__lowercase = RobertaTokenizer.from_pretrained("""roberta-base""" )
__lowercase = tokenizer.encode_plus(a__ )["""input_ids"""]
# Get gluon output
__lowercase = mx.nd.array([input_ids] )
__lowercase = original_bort(inputs=a__ ,token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(a__ )
__lowercase = BertModel.from_pretrained(a__ )
hf_bort_model.eval()
__lowercase = tokenizer.encode_plus(a__ ,return_tensors="""pt""" )
__lowercase = hf_bort_model(**a__ )[0]
__lowercase = output_gluon[0].asnumpy()
__lowercase = output_hf[0].detach().numpy()
__lowercase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__lowercase = np.allclose(a__ ,a__ ,atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" ,a__ )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A : Any = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 163 | 0 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
_A : Any = TypeVar("""KT""")
_A : Any = TypeVar("""VT""")
class __snake_case ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , A_ = "root" , A_ = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = key
SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = []
def __repr__( self ):
'''simple docstring'''
return f'''Node({self.key}: {self.value})'''
@property
def lowercase_ ( self ):
'''simple docstring'''
return len(self.forward )
class __snake_case ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , A_ = 0.5 , A_ = 16 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Node[KT, VT]()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = p
SCREAMING_SNAKE_CASE__ = max_level
def __str__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = list(self )
if len(A_ ) == 0:
return f'''SkipList(level={self.level})'''
SCREAMING_SNAKE_CASE__ = max((len(str(A_ ) ) for item in items) , default=4 )
SCREAMING_SNAKE_CASE__ = max(A_ , 4 ) + 4
SCREAMING_SNAKE_CASE__ = self.head
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = node.forward.copy()
lines.append(f'''[{node.key}]'''.ljust(A_ , '''-''' ) + '''* ''' * len(A_ ) )
lines.append(''' ''' * label_size + '''| ''' * len(A_ ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE__ = node.forward[0]
lines.append(
f'''[{node.key}]'''.ljust(A_ , '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(A_ ) )
SCREAMING_SNAKE_CASE__ = node.forward
lines.append('''None'''.ljust(A_ ) + '''* ''' * len(A_ ) )
return f'''SkipList(level={self.level})\n''' + "\n".join(A_ )
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE__ = node.forward[0]
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE__ = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(A_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._locate_node(A_ )
if node is not None:
for i, update_node in enumerate(A_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE__ = node.forward[i]
else:
SCREAMING_SNAKE_CASE__ = update_node.forward[:i]
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._locate_node(A_ )
if node is not None:
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , A_ ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE__ = level
SCREAMING_SNAKE_CASE__ = Node(A_ , A_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(A_ )
else:
SCREAMING_SNAKE_CASE__ = new_node
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._locate_node(A_ )
if node is not None:
return node.value
return None
def __snake_case ( ) -> str:
SCREAMING_SNAKE_CASE__ = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 1_2 )
skip_list.insert('''Key3''' , 4_1 )
skip_list.insert('''Key4''' , -1_9 )
SCREAMING_SNAKE_CASE__ = skip_list.head
SCREAMING_SNAKE_CASE__ = {}
while node.level != 0:
SCREAMING_SNAKE_CASE__ = node.forward[0]
SCREAMING_SNAKE_CASE__ = node.value
assert len(lowerCAmelCase_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __snake_case ( ) -> str:
SCREAMING_SNAKE_CASE__ = SkipList()
skip_list.insert('''Key1''' , 1_0 )
skip_list.insert('''Key1''' , 1_2 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 1_0 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 1_0 )
SCREAMING_SNAKE_CASE__ = skip_list.head
SCREAMING_SNAKE_CASE__ = {}
while node.level != 0:
SCREAMING_SNAKE_CASE__ = node.forward[0]
SCREAMING_SNAKE_CASE__ = node.value
if len(lowerCAmelCase_ ) != 4:
print()
assert len(lowerCAmelCase_ ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __snake_case ( ) -> Dict:
SCREAMING_SNAKE_CASE__ = SkipList()
assert skip_list.find('''Some key''' ) is None
def __snake_case ( ) -> Tuple:
SCREAMING_SNAKE_CASE__ = SkipList()
skip_list.insert('''Key2''' , 2_0 )
assert skip_list.find('''Key2''' ) == 2_0
skip_list.insert('''Some Key''' , 1_0 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 1_3 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 1_0
assert skip_list.find('''V''' ) == 1_3
def __snake_case ( ) -> Tuple:
SCREAMING_SNAKE_CASE__ = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def __snake_case ( ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = SkipList()
skip_list.insert('''Key1''' , 1_2 )
skip_list.insert('''V''' , 1_3 )
skip_list.insert('''X''' , 1_4 )
skip_list.insert('''Key2''' , 1_5 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ) -> List[str]:
SCREAMING_SNAKE_CASE__ = SkipList()
skip_list.insert('''Key1''' , 1_2 )
skip_list.insert('''V''' , 1_3 )
skip_list.insert('''X''' , 1_4 )
skip_list.insert('''Key2''' , 1_5 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 1_4
assert skip_list.find('''Key1''' ) == 1_2
assert skip_list.find('''Key2''' ) == 1_5
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 1_2
assert skip_list.find('''Key2''' ) == 1_5
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 1_5
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = SkipList()
skip_list.insert('''Key1''' , 1_2 )
skip_list.insert('''V''' , 1_3 )
skip_list.insert('''X''' , 1_4_2 )
skip_list.insert('''Key2''' , 1_5 )
skip_list.delete('''X''' )
def traverse_keys(lowerCAmelCase_ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(lowerCAmelCase_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __snake_case ( ) -> Any:
def is_sorted(lowerCAmelCase_ ):
return all(next_item >= item for item, next_item in zip(lowerCAmelCase_ , lst[1:] ) )
SCREAMING_SNAKE_CASE__ = SkipList()
for i in range(1_0 ):
skip_list.insert(lowerCAmelCase_ , lowerCAmelCase_ )
assert is_sorted(list(lowerCAmelCase_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(lowerCAmelCase_ ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(lowerCAmelCase_ ) )
def __snake_case ( ) -> Dict:
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __snake_case ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 100 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def A__ ( A_ ) -> List[str]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def A__ ( A_ ) -> str:
# word like '180' or '身高' or '神'
for char in word:
_lowercase = ord(A_ )
if not _is_chinese_char(A_ ):
return 0
return 1
def A__ ( A_ ) -> int:
_lowercase = set()
for token in tokens:
_lowercase = len(A_ ) > 1 and is_chinese(A_ )
if chinese_word:
word_set.add(A_ )
_lowercase = list(A_ )
return word_list
def A__ ( A_ , A_ ) -> Optional[int]:
if not chinese_word_set:
return bert_tokens
_lowercase = max([len(A_ ) for w in chinese_word_set] )
_lowercase = bert_tokens
_lowercase , _lowercase = 0, len(A_ )
while start < end:
_lowercase = True
if is_chinese(bert_word[start] ):
_lowercase = min(end - start , A_ )
for i in range(A_ , 1 , -1 ):
_lowercase = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowercase = "##" + bert_word[j]
_lowercase = start + i
_lowercase = False
break
if single_word:
start += 1
return bert_word
def A__ ( A_ , A_ , A_ ) -> Dict:
_lowercase = []
for i in range(0 , len(A_ ) , 100 ):
_lowercase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowercase = [get_chinese_word(A_ ) for r in res]
ltp_res.extend(A_ )
assert len(A_ ) == len(A_ )
_lowercase = []
for i in range(0 , len(A_ ) , 100 ):
_lowercase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=A_ , truncation=A_ , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(A_ ) == len(A_ )
_lowercase = []
for input_ids, chinese_word in zip(A_ , A_ ):
_lowercase = []
for id in input_ids:
_lowercase = bert_tokenizer._convert_id_to_token(A_ )
input_tokens.append(A_ )
_lowercase = add_sub_symbol(A_ , A_ )
_lowercase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(A_ ):
if token[:2] == "##":
_lowercase = token[2:]
# save chinese tokens' pos
if len(A_ ) == 1 and _is_chinese_char(ord(A_ ) ):
ref_id.append(A_ )
ref_ids.append(A_ )
assert len(A_ ) == len(A_ )
return ref_ids
def A__ ( A_ ) -> str:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8" ) as f:
_lowercase = f.readlines()
_lowercase = [line.strip() for line in data if len(A_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowercase = LTP(args.ltp ) # faster in GPU device
_lowercase = BertTokenizer.from_pretrained(args.bert )
_lowercase = prepare_ref(A_ , A_ , A_ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
_lowercase = [json.dumps(A_ ) + "\n" for ref in ref_ids]
f.writelines(A_ )
if __name__ == "__main__":
__magic_name__ : Optional[Any] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
__magic_name__ : Dict = parser.parse_args()
main(args)
| 497 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 100_0000 ):
__SCREAMING_SNAKE_CASE = set(range(3 , UpperCamelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase_ , UpperCamelCase_ ) ) )
__SCREAMING_SNAKE_CASE = [float(UpperCamelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 248 |
"""simple docstring"""
import baseaa
def _lowerCAmelCase ( UpperCamelCase_ ):
return baseaa.baaencode(string.encode("""utf-8""" ) )
def _lowerCAmelCase ( UpperCamelCase_ ):
return baseaa.baadecode(UpperCamelCase_ ).decode("""utf-8""" )
if __name__ == "__main__":
__magic_name__ = "Hello World!"
__magic_name__ = baseaa_encode(test)
print(encoded)
__magic_name__ = baseaa_decode(encoded)
print(decoded)
| 248 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None ):
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "The csv file to plot."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Disable logarithmic scale when plotting"} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
__SCREAMING_SNAKE_CASE = list_field(
default=a , metadata={"help": "List of model names that are used instead of the ones in the csv file."})
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
try:
int(UpperCamelCase__ )
return True
except ValueError:
return False
def _UpperCAmelCase (UpperCamelCase__ : int ):
try:
float(UpperCamelCase__ )
return True
except ValueError:
return False
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Dict:
_A : int = args
_A : Union[str, Any] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}})
with open(self.args.csv_file , newline="") as csv_file:
_A : Union[str, Any] = csv.DictReader(__lowerCamelCase)
for row in reader:
_A : List[str] = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"]))
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"]))
if can_convert_to_int(row["result"]):
# value is not None
_A : Union[str, Any] = int(row["result"])
elif can_convert_to_float(row["result"]):
# value is not None
_A : Dict = float(row["result"])
def _lowerCamelCase ( self) -> Dict:
_A , _A : Any = plt.subplots()
_A : Tuple = "Time usage" if self.args.is_time else "Memory usage"
_A : int = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log")
ax.set_yscale("log")
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
for model_name_idx, model_name in enumerate(self.result_dict.keys()):
_A : Optional[int] = sorted(set(self.result_dict[model_name]["bsz"]))
_A : List[str] = sorted(set(self.result_dict[model_name]["seq_len"]))
_A : int = self.result_dict[model_name]["result"]
((_A) , (_A)) : Optional[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_A : Tuple = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_A : Tuple = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCamelCase , )
else:
_A : Dict = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_A) , (_A)) : Optional[Any] = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
_A : Union[str, Any] = np.asarray(__lowerCamelCase , __lowerCamelCase)[: len(__lowerCamelCase)]
plt.scatter(
__lowerCamelCase , __lowerCamelCase , label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}")
plt.plot(__lowerCamelCase , __lowerCamelCase , "--")
title_str += F" {label_model_name} vs."
_A : int = title_str[:-4]
_A : Dict = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(__lowerCamelCase)
plt.xlabel(__lowerCamelCase)
plt.ylabel(__lowerCamelCase)
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file)
else:
plt.show()
def _UpperCAmelCase ():
_A : Optional[Any] = HfArgumentParser(UpperCamelCase__ )
_A : str = parser.parse_args_into_dataclasses()[0]
_A : Optional[Any] = Plot(args=UpperCamelCase__ )
plot.plot()
if __name__ == "__main__":
main()
| 503 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = tmp_path / "cache"
_A : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A : List[str] = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ):
_A : Dict = tmp_path / "cache"
_A : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_A : Optional[Any] = features.copy() if features else default_expected_features
_A : Union[str, Any] = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_A : int = ParquetDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ):
_A : Any = tmp_path / "cache"
_A : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_A : List[str] = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , split=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _UpperCAmelCase (UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
if issubclass(UpperCamelCase__ , UpperCamelCase__ ):
_A : Optional[int] = parquet_path
elif issubclass(UpperCamelCase__ , UpperCamelCase__ ):
_A : Optional[int] = [parquet_path]
_A : Dict = tmp_path / "cache"
_A : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_A : Tuple = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_dataset(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=("train",) ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for split in splits:
_A : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
_A : Tuple = tmp_path / "cache"
_A : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A : List[str] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] ):
_A : Optional[int] = tmp_path / "cache"
_A : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_A : str = features.copy() if features else default_expected_features
_A : Any = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_A : int = ParquetDatasetReader({"train": parquet_path} , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if split:
_A : Any = {split: parquet_path}
else:
_A : Optional[Any] = "train"
_A : int = {"train": parquet_path, "test": parquet_path}
_A : Any = tmp_path / "cache"
_A : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_A : Dict = ParquetDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_parquet_datasetdict(UpperCamelCase__ , UpperCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _UpperCAmelCase (UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
_A : Union[str, Any] = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / "foo.parquet" )
assert writer.write() > 0
_A : List[Any] = pq.ParquetFile(tmp_path / "foo.parquet" )
_A : List[str] = pf.read()
assert dataset.data.table == output_table
def _UpperCAmelCase (UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
_A : Any = str(shared_datadir / "test_image_rgb.jpg" )
_A : Dict = {"image": [image_path]}
_A : Union[str, Any] = Features({"image": Image()} )
_A : List[Any] = Dataset.from_dict(UpperCamelCase__ , features=UpperCamelCase__ )
_A : Any = ParquetDatasetWriter(UpperCamelCase__ , tmp_path / "foo.parquet" )
assert writer.write() > 0
_A : Optional[int] = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_A : Union[str, Any] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=UpperCamelCase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
assert get_writer_batch_size(UpperCamelCase__ ) == expected
| 503 | 1 |
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ = 1_000_000 ):
"""simple docstring"""
A__ = set(range(3 , UpperCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase__ , UpperCamelCase__ ) ) )
A__ = [float(UpperCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase__ , limit + 1 , UpperCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 710 | """simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
__lowerCamelCase = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
__lowerCamelCase = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
__lowerCamelCase = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
A__ = new_id
# turn into Numpy arrays
A__ = np.array(UpperCamelCase__ )
A__ = np.array(UpperCamelCase__ )
if reduce_labels:
A__ = 255
A__ = label - 1
A__ = 255
A__ = label != ignore_index
A__ = np.not_equal(UpperCamelCase__ , UpperCamelCase__ )
A__ = pred_label[mask]
A__ = np.array(UpperCamelCase__ )[mask]
A__ = pred_label[pred_label == label]
A__ = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0]
A__ = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0]
A__ = np.histogram(UpperCamelCase__ , bins=UpperCamelCase__ , range=(0, num_labels - 1) )[0]
A__ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False , ):
"""simple docstring"""
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(UpperCamelCase__ , UpperCamelCase__ ):
A__ , A__ , A__ , A__ = intersect_and_union(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , ):
"""simple docstring"""
A__ , A__ , A__ , A__ = total_intersect_and_union(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# compute metrics
A__ = {}
A__ = total_area_intersect.sum() / total_area_label.sum()
A__ = total_area_intersect / total_area_union
A__ = total_area_intersect / total_area_label
A__ = np.nanmean(UpperCamelCase__ )
A__ = np.nanmean(UpperCamelCase__ )
A__ = all_acc
A__ = iou
A__ = acc
if nan_to_num is not None:
A__ = {metric: np.nan_to_num(UpperCamelCase__ , nan=UpperCamelCase__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
def snake_case__ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) ,reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] ,)
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = False ,) -> Tuple:
A__ = mean_iou(
results=__UpperCAmelCase ,gt_seg_maps=__UpperCAmelCase ,num_labels=__UpperCAmelCase ,ignore_index=__UpperCAmelCase ,nan_to_num=__UpperCAmelCase ,label_map=__UpperCAmelCase ,reduce_labels=__UpperCAmelCase ,)
return iou_result
| 536 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[str] ={
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class A_ ( a__ ):
_A :List[Any] = "biogpt"
def __init__( self : List[Any] , snake_case__ : Dict=4_23_84 , snake_case__ : int=10_24 , snake_case__ : Any=24 , snake_case__ : Optional[int]=16 , snake_case__ : int=40_96 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : Dict=10_24 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : str=1E-12 , snake_case__ : Dict=True , snake_case__ : Optional[Any]=True , snake_case__ : str=0.0 , snake_case__ : List[Any]=0.0 , snake_case__ : Tuple=1 , snake_case__ : List[str]=0 , snake_case__ : List[Any]=2 , **snake_case__ : Optional[Any] , ):
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = scale_embedding
lowercase = use_cache
lowercase = layerdrop
lowercase = activation_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 428 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__UpperCAmelCase = {
'''facebook/xglm-564M''': 2_048,
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase : Any = 7
UpperCamelCase : Optional[int] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCamelCase : Dict = kwargs.get('additional_special_tokens', [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase : Optional[int] = len(self.sp_model )
UpperCamelCase : Any = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
UpperCamelCase : int = self.__dict__.copy()
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : Any = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
UpperCamelCase : Any = {}
UpperCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase : Optional[int] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def snake_case_ ( self ) -> int:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase : Union[str, Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, ' ' ).strip()
return out_string
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_, 'wb' ) as fi:
UpperCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 40 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowercase :
"""simple docstring"""
a__ = BlenderbotSmallConfig
a__ = {}
a__ = "gelu"
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=False , __snake_case=99 , __snake_case=32 , __snake_case=2 , __snake_case=4 , __snake_case=37 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=20 , __snake_case=2 , __snake_case=1 , __snake_case=0 , ):
_UpperCamelCase : List[str] = parent
_UpperCamelCase : List[Any] = batch_size
_UpperCamelCase : Any = seq_length
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : List[str] = use_labels
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : Optional[Any] = num_attention_heads
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCamelCase : int = max_position_embeddings
_UpperCamelCase : Optional[int] = eos_token_id
_UpperCamelCase : List[Any] = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
def A__ ( self):
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
_UpperCamelCase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1)
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase : Any = prepare_blenderbot_small_inputs_dict(__snake_case , __snake_case , __snake_case)
return config, inputs_dict
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : Tuple = TFBlenderbotSmallModel(config=__snake_case).get_decoder()
_UpperCamelCase : Dict = inputs_dict['input_ids']
_UpperCamelCase : Optional[int] = input_ids[:1, :]
_UpperCamelCase : List[Any] = inputs_dict['attention_mask'][:1, :]
_UpperCamelCase : Union[str, Any] = inputs_dict['head_mask']
_UpperCamelCase : Tuple = 1
# first forward pass
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , head_mask=__snake_case , use_cache=__snake_case)
_UpperCamelCase : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size)
_UpperCamelCase : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
_UpperCamelCase : Any = tf.concat([input_ids, next_tokens] , axis=-1)
_UpperCamelCase : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1)
_UpperCamelCase : Union[str, Any] = model(__snake_case , attention_mask=__snake_case)[0]
_UpperCamelCase : Union[str, Any] = model(__snake_case , attention_mask=__snake_case , past_key_values=__snake_case)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
_UpperCamelCase : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1]))
_UpperCamelCase : int = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__snake_case , __snake_case , rtol=1e-3)
def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
_UpperCamelCase : Optional[int] = tf.cast(tf.math.not_equal(UpperCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCamelCase : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
a__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
a__ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ = True
a__ = False
a__ = False
def A__ ( self):
_UpperCamelCase : Tuple = TFBlenderbotSmallModelTester(self)
_UpperCamelCase : Any = ConfigTester(self , config_class=__snake_case)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__snake_case)
@require_tokenizers
@require_tf
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
a__ = "facebook/blenderbot_small-90M"
@cached_property
def A__ ( self):
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M')
@cached_property
def A__ ( self):
_UpperCamelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def A__ ( self):
_UpperCamelCase : Optional[int] = self.tokenizer(self.src_text , return_tensors='tf')
_UpperCamelCase : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__snake_case , )
_UpperCamelCase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__snake_case)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 700 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase__ = 5
lowerCAmelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = SpeechaTextTokenizer
a__ = False
a__ = True
def A__ ( self):
super().setUp()
_UpperCamelCase : Any = sp.SentencePieceProcessor()
spm_model.Load(__snake_case)
_UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))]
_UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case))))
_UpperCamelCase : Tuple = Path(self.tmpdirname)
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
_UpperCamelCase : str = '<pad>'
_UpperCamelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(__snake_case) , 10_01)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def A__ ( self):
_UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCamelCase : List[str] = tokenizer.tokenize('This is a test')
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case)
self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case)
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A__ ( self):
# fmt: off
_UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
a__ = "valhalla/s2t_mustc_multilinguial_medium"
a__ = "C'est trop cool"
a__ = "Esto es genial"
@classmethod
def A__ ( cls):
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def A__ ( self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def A__ ( self):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def A__ ( self):
self.assertIn(__snake_case , self.tokenizer.all_special_ids)
_UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case)
_UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case)
self.assertEqual(__snake_case , __snake_case)
self.assertNotIn(self.tokenizer.eos_token , __snake_case)
def A__ ( self):
_UpperCamelCase : Any = 'fr'
_UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , __snake_case)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def A__ ( self):
_UpperCamelCase : Union[str, Any] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCamelCase : List[str] = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 648 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.