code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : Dict = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : List[str] = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
_lowerCAmelCase : Optional[Any] = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = RealmTokenizer
def __init__( self :int , snake_case :Dict=None , snake_case :Optional[int]=None , snake_case :List[Any]=True , snake_case :str="[UNK]" , snake_case :int="[SEP]" , snake_case :Dict="[PAD]" , snake_case :Optional[Any]="[CLS]" , snake_case :int="[MASK]" , snake_case :Optional[int]=True , snake_case :str=None , **snake_case :Union[str, Any] , ):
'''simple docstring'''
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
A_ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
A_ : int = getattr(snake_case , normalizer_state.pop("type" ) )
A_ : str = do_lower_case
A_ : Union[str, Any] = strip_accents
A_ : Optional[Any] = tokenize_chinese_chars
A_ : Dict = normalizer_class(**snake_case )
A_ : Any = do_lower_case
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , **snake_case :Optional[Any] ):
'''simple docstring'''
A_ : int = PaddingStrategy.MAX_LENGTH
A_ : str = text
A_ : Optional[Any] = kwargs.pop("text_pair" , snake_case )
A_ : Dict = kwargs.pop("return_tensors" , snake_case )
A_ : Dict = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(snake_case ):
if batch_text_pair is not None:
A_ : Union[str, Any] = batch_text_pair[idx]
else:
A_ : Optional[int] = None
A_ : List[Any] = super().__call__(snake_case , snake_case , return_tensors=snake_case , **snake_case )
A_ : Dict = encoded_candidates.get("input_ids" )
A_ : List[Any] = encoded_candidates.get("attention_mask" )
A_ : List[Any] = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case )
A_ : List[str] = {key: item for key, item in output_data.items() if len(snake_case ) != 0}
return BatchEncoding(snake_case , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Union[str, Any] , snake_case :List[Any]=None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
A_ : Any = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 300 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __magic_name__ :
"""simple docstring"""
def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :Tuple=13 , snake_case :Dict=7 , snake_case :List[Any]=True , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :Any=True , snake_case :Optional[int]=99 , snake_case :Any=32 , snake_case :Dict=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :List[str]="gelu" , snake_case :List[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Tuple=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Optional[int]=0.02 , snake_case :str=3 , snake_case :Optional[int]=4 , snake_case :List[str]=None , snake_case :Tuple=1_000 , ):
'''simple docstring'''
A_ : str = parent
A_ : str = batch_size
A_ : str = seq_length
A_ : Any = is_training
A_ : Any = use_input_mask
A_ : str = use_token_type_ids
A_ : Tuple = use_labels
A_ : Optional[Any] = vocab_size
A_ : Dict = hidden_size
A_ : str = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : str = intermediate_size
A_ : int = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : Any = type_sequence_label_size
A_ : Dict = initializer_range
A_ : Any = num_labels
A_ : Optional[int] = num_choices
A_ : Optional[Any] = scope
A_ : Any = range_bbox
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ : str = bbox[i, j, 3]
A_ : Union[str, Any] = bbox[i, j, 1]
A_ : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ : Any = bbox[i, j, 2]
A_ : Tuple = bbox[i, j, 0]
A_ : int = t
A_ : int = tf.convert_to_tensor(snake_case )
A_ : Any = None
if self.use_input_mask:
A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : str = None
if self.use_token_type_ids:
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Dict = None
A_ : List[Any] = None
A_ : List[str] = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = ids_tensor([self.batch_size] , self.num_choices )
A_ : int = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Any = TFLayoutLMModel(config=snake_case )
A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A_ : str = model(snake_case , snake_case , token_type_ids=snake_case )
A_ : List[Any] = model(snake_case , snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Any , snake_case :Union[str, Any] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Optional[int] = TFLayoutLMForMaskedLM(config=snake_case )
A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :Tuple , snake_case :str , snake_case :Optional[int] , snake_case :Any ):
'''simple docstring'''
A_ : Union[str, Any] = self.num_labels
A_ : int = TFLayoutLMForSequenceClassification(config=snake_case )
A_ : Optional[int] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :str , snake_case :Optional[Any] , snake_case :int , snake_case :Any , snake_case :Tuple , snake_case :List[str] , snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.num_labels
A_ : str = TFLayoutLMForTokenClassification(config=snake_case )
A_ : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ):
'''simple docstring'''
A_ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=snake_case )
A_ : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 10
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Tuple = TFLayoutLMModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[str] = TFLayoutLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def __snake_case ( ) -> Optional[Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
A_ : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
A_ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
A_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
A_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
A_ : Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : str = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Tuple = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the sequence output on [0, :3, :3]
A_ : List[Any] = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-3 ) )
# test the pooled output on [1, :3]
A_ : Optional[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Dict = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
A_ : List[str] = outputs.loss
A_ : Union[str, Any] = (2,)
self.assertEqual(loss.shape , snake_case )
# test the shape of the logits
A_ : Tuple = outputs.logits
A_ : Tuple = (2, 2)
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : int = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
# test the shape of the logits
A_ : Dict = outputs.logits
A_ : List[Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
A_ , A_ , A_ , A_ , A_ : str = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the shape of the logits
A_ : Union[str, Any] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , snake_case )
self.assertEqual(outputs.end_logits.shape , snake_case )
| 300 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Tuple = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 339 |
from functools import lru_cache
def __magic_name__ ( __lowerCAmelCase : int ) -> set:
__lowerCamelCase = 2
__lowerCamelCase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__lowerCAmelCase )
if n > 1:
factors.add(__lowerCAmelCase )
return factors
@lru_cache
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return len(unique_prime_factors(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : list ) -> bool:
return len(set(__lowerCAmelCase ) ) in (0, 1)
def __magic_name__ ( __lowerCAmelCase : int ) -> list:
__lowerCamelCase = 2
while True:
# Increment each value of a generated range
__lowerCamelCase = [base + i for i in range(__lowerCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowerCamelCase = [upf_len(__lowerCAmelCase ) for x in group]
checker.append(__lowerCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(__lowerCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def __magic_name__ ( __lowerCAmelCase : int = 4 ) -> int:
__lowerCamelCase = run(__lowerCAmelCase )
return results[0] if len(__lowerCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 339 | 1 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
SCREAMING_SNAKE_CASE = Dataset.from_dict(_SCREAMING_SNAKE_CASE )
return dataset
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_dataset()
SCREAMING_SNAKE_CASE = make_duplicate_clusters(lowerCamelCase__ ,0.85 )
self.assertEqual(len(duplicate_clusters[0] ) ,2 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_dataset()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = deduplicate_dataset(lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) ,2 )
print(lowerCamelCase__ )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] ,2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] ,lowerCamelCase__ )
| 296 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ,return_dict=lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """google/ncsnpp-celebahq-256"""
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 296 | 1 |
'''simple docstring'''
import math
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
A = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCAmelCase )
if number < 1:
A = f"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
A = int(math.log(number // 3 , 2 ) ) + 2
A = [3, 5]
A = 2
A = 3
for block in range(1 , UpperCAmelCase ):
for _ in range(UpperCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
_lowerCamelCase : Optional[int] = 0
try:
_lowerCamelCase : int = proth(number)
except ValueError:
print(f"ValueError: there is no {number}th Proth number")
continue
print(f"The {number}th Proth number: {value}")
| 368 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase : List[str] = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase : List[str] = {
'Salesforce/codegen-350M-mono': 2048,
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = CodeGenTokenizer
def __init__(self : int , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[Any]="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Dict="<|endoftext|>" , _lowerCAmelCase : Any=False , **_lowerCAmelCase : Optional[int] , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
if kwargs.pop("""add_bos_token""" , _lowerCAmelCase ):
A = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
A = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
A = add_prefix_space
A = pre_tok_class(**_lowerCAmelCase )
A = add_prefix_space
def A (self : int , *_lowerCAmelCase : int , **_lowerCAmelCase : List[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Dict , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[Any] ):
A = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
A = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def A (self : Tuple , _lowerCAmelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[List[str]] = None , **_lowerCAmelCase : Tuple , ):
A = super().decode(
token_ids=_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase , **_lowerCAmelCase , )
if truncate_before_pattern is not None and len(_lowerCAmelCase ) > 0:
A = self.truncate(_lowerCAmelCase , _lowerCAmelCase )
return decoded_text
def A (self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
def find_re(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
A = pattern.search(_lowerCAmelCase , _lowerCAmelCase )
return m.start() if m else -1
A = [re.compile(_lowerCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
A = list(re.finditer("""^print""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: prints[1].start()]
A = list(re.finditer("""^def""" , _lowerCAmelCase , re.MULTILINE ) )
if len(_lowerCAmelCase ) > 1:
A = completion[: defs[1].start()]
A = 0
A = [
pos for pos in [find_re(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for terminal in terminals] if pos != -1
]
if len(_lowerCAmelCase ) > 0:
return completion[: min(_lowerCAmelCase )]
else:
return completion
| 337 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_UpperCamelCase : List[Any] = logging.get_logger(__name__)
_UpperCamelCase : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_UpperCamelCase : Optional[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_UpperCamelCase : Optional[int] = {
"allenai/led-base-16384": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def a_ ( ):
'''simple docstring'''
lowercase__ : int = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowercase__ : Union[str, Any] = bs[:]
lowercase__ : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCAmelCase )
cs.append(2**8 + n )
n += 1
lowercase__ : str = [chr(_lowerCAmelCase ) for n in cs]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Dict = set()
lowercase__ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Optional[Any] = char
return pairs
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = VOCAB_FILES_NAMES
lowerCamelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , a , a , a="replace" , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a=False , **a , ) -> Any:
lowercase__ : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
lowercase__ : List[str] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
lowercase__ : List[str] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
lowercase__ : Dict = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
lowercase__ : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
lowercase__ : Tuple = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding='utf-8' ) as vocab_handle:
lowercase__ : Tuple = json.load(a )
lowercase__ : Dict = {v: k for k, v in self.encoder.items()}
lowercase__ : str = errors # how to handle errors in decoding
lowercase__ : Optional[Any] = bytes_to_unicode()
lowercase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding='utf-8' ) as merges_handle:
lowercase__ : Optional[Any] = merges_handle.read().split('\n' )[1:-1]
lowercase__ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ : Union[str, Any] = dict(zip(a , range(len(a ) ) ) )
lowercase__ : Tuple = {}
lowercase__ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ : List[Any] = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _UpperCAmelCase ( self ) -> List[Any]:
return len(self.encoder )
def _UpperCAmelCase ( self ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCAmelCase ( self , a ) -> List[str]:
if token in self.cache:
return self.cache[token]
lowercase__ : Optional[Any] = tuple(a )
lowercase__ : int = get_pairs(a )
if not pairs:
return token
while True:
lowercase__ : List[str] = min(a , key=lambda a : self.bpe_ranks.get(a , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : List[str] = bigram
lowercase__ : Union[str, Any] = []
lowercase__ : List[Any] = 0
while i < len(a ):
try:
lowercase__ : str = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : Optional[int] = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : int = tuple(a )
lowercase__ : Dict = new_word
if len(a ) == 1:
break
else:
lowercase__ : Any = get_pairs(a )
lowercase__ : List[str] = ' '.join(a )
lowercase__ : Optional[Any] = word
return word
def _UpperCAmelCase ( self , a ) -> Union[str, Any]:
lowercase__ : Tuple = []
for token in re.findall(self.pat , a ):
lowercase__ : Union[str, Any] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(' ' ) )
return bpe_tokens
def _UpperCAmelCase ( self , a ) -> Optional[Any]:
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def _UpperCAmelCase ( self , a ) -> Optional[int]:
return self.decoder.get(a )
def _UpperCAmelCase ( self , a ) -> str:
lowercase__ : Any = ''.join(a )
lowercase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def _UpperCAmelCase ( self , a , a = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__ : str = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + '\n' )
lowercase__ : List[Any] = 0
with open(a , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowercase__ : Union[str, Any] = token_index
writer.write(' '.join(a ) + '\n' )
index += 1
return vocab_file, merge_file
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : Union[str, Any] = [self.cls_token_id]
lowercase__ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self , a , a = None , a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
lowercase__ : Dict = [self.sep_token_id]
lowercase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self , a , a=False , **a ) -> Optional[int]:
lowercase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
lowercase__ : List[str] = ' ' + text
return (text, kwargs)
def _UpperCAmelCase ( self , a , a = None , a = PaddingStrategy.DO_NOT_PAD , a = None , a = None , ) -> dict:
lowercase__ : Dict = super()._pad(
encoded_inputs=a , max_length=a , padding_strategy=a , pad_to_multiple_of=a , return_attention_mask=a , )
# Load from model defaults
if return_attention_mask is None:
lowercase__ : Union[str, Any] = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__ : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__ : Tuple = len(encoded_inputs['global_attention_mask'] ) != len(a )
if needs_to_be_padded:
lowercase__ : str = len(a ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__ : Union[str, Any] = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__ : List[str] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 77 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 59 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
def __init__( self : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : Dict=1_0 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Tuple=3_2 * 8 , UpperCAmelCase__ : Tuple=3_2 * 8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=6_4 , ) -> Optional[Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = is_training
lowerCAmelCase = use_auxiliary_loss
lowerCAmelCase = num_queries
lowerCAmelCase = num_channels
lowerCAmelCase = min_size
lowerCAmelCase = max_size
lowerCAmelCase = num_labels
lowerCAmelCase = hidden_dim
lowerCAmelCase = hidden_dim
def __UpperCAmelCase ( self : Tuple ) -> Dict:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCAmelCase__ )
lowerCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase__ )
lowerCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase__ ) > 0.5
).float()
lowerCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase__ ) > 0.5).long()
lowerCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
lowerCAmelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase = self.num_queries
lowerCAmelCase = self.num_labels
lowerCAmelCase = [1, 1, 1, 1]
lowerCAmelCase = self.num_channels
lowerCAmelCase = 6_4
lowerCAmelCase = 1_2_8
lowerCAmelCase = self.hidden_dim
lowerCAmelCase = self.hidden_dim
lowerCAmelCase = self.hidden_dim
return config
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] ) -> List[Any]:
lowerCAmelCase = output.encoder_hidden_states
lowerCAmelCase = output.pixel_decoder_hidden_states
lowerCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) , config.decoder_layers )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple=False ) -> Dict:
with torch.no_grad():
lowerCAmelCase = MaskaFormerModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple ) -> Optional[int]:
lowerCAmelCase = MaskaFormerForUniversalSegmentation(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
def comm_check_on_output(UpperCAmelCase__ : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase = model(pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
comm_check_on_output(UpperCAmelCase__ )
lowerCAmelCase = model(
pixel_values=UpperCAmelCase__ , pixel_mask=UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__ )
comm_check_on_output(UpperCAmelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase : List[str] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase : int = False
lowerCamelCase : List[Any] = False
lowerCamelCase : str = False
lowerCamelCase : int = False
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase = MaskaFormerModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCAmelCase__ , **UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*UpperCAmelCase__ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def __UpperCAmelCase ( self : Tuple ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
pass
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase = MaskaFormerModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = (self.model_tester.min_size,) * 2
lowerCAmelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=UpperCAmelCase__ ),
'mask_labels': torch.randn((2, 1_0, *size) , device=UpperCAmelCase__ ),
'class_labels': torch.zeros(2 , 1_0 , device=UpperCAmelCase__ ).long(),
}
lowerCAmelCase = self.model_tester.get_config()
lowerCAmelCase = MaskaFormerForUniversalSegmentation(UpperCAmelCase__ ).to(UpperCAmelCase__ )
lowerCAmelCase = model(**UpperCAmelCase__ )
self.assertTrue(outputs.loss is not None )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCAmelCase__ , **UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
lowerCAmelCase = model(**UpperCAmelCase__ , output_attentions=UpperCAmelCase__ )
self.assertTrue(outputs.attentions is not None )
def __UpperCAmelCase ( self : int ) -> str:
if not self.model_tester.is_training:
return
lowerCAmelCase = self.all_model_classes[1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
lowerCAmelCase = model(UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__ ).loss
loss.backward()
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
lowerCAmelCase = self.all_model_classes[1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
model.train()
lowerCAmelCase = model(UpperCAmelCase__ , mask_labels=UpperCAmelCase__ , class_labels=UpperCAmelCase__ )
lowerCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__snake_case =1e-4
def a_ ( ):
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : Optional[int] ) -> str:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __UpperCAmelCase ( self : Any ) -> Tuple:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase__ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(UpperCAmelCase__ , return_tensors='pt' ).to(UpperCAmelCase__ )
lowerCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(UpperCAmelCase__ , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
lowerCAmelCase = model(**UpperCAmelCase__ )
lowerCAmelCase = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(UpperCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
lowerCAmelCase = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(UpperCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
lowerCAmelCase = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(UpperCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : str ) -> Any:
lowerCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase__ ).eval()
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(UpperCAmelCase__ , return_tensors='pt' ).to(UpperCAmelCase__ )
lowerCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(UpperCAmelCase__ , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
lowerCAmelCase = model(**UpperCAmelCase__ )
# masks_queries_logits
lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowerCAmelCase = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
lowerCAmelCase = torch.tensor(UpperCAmelCase__ ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
# class_queries_logits
lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase__ , atol=UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase__ ).eval()
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='pt' , )
lowerCAmelCase = inputs['pixel_values'].to(UpperCAmelCase__ )
lowerCAmelCase = [el.to(UpperCAmelCase__ ) for el in inputs['mask_labels']]
lowerCAmelCase = [el.to(UpperCAmelCase__ ) for el in inputs['class_labels']]
with torch.no_grad():
lowerCAmelCase = model(**UpperCAmelCase__ )
self.assertTrue(outputs.loss is not None )
| 55 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a_ ( ):
lowerCAmelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=lowerCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=lowerCamelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=lowerCamelCase )
return parser.parse_args()
def a_ ( ):
lowerCAmelCase = parse_args()
# Import training_script as a module.
lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase = script_fpath.stem
lowerCAmelCase = importlib.import_module(lowerCamelCase )
# Patch sys.argv
lowerCAmelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 55 | 1 |
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = set_counts
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = len(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = [1] * num_sets
UpperCamelCase__ :List[Any] = list(range(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.get_parent(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = self.get_parent(UpperCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCamelCase__ :Tuple = 0
UpperCamelCase__ :Dict = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCamelCase__ :Optional[int] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCamelCase__ :Tuple = 0
UpperCamelCase__ :str = src_parent
UpperCamelCase__ :Optional[Any] = self.set_counts[src_parent]
UpperCamelCase__ :Tuple = max(self.max_set , UpperCamelCase_ )
return True
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
UpperCamelCase__ :Dict = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set] | 97 | import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
UpperCamelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
UpperCamelCase__ = '</w>'
UpperCamelCase__ = '@@ '
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
# Speech2Text2 has no max input length
UpperCamelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
def __init__(self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : str=None , **__UpperCAmelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = do_lower_case
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__ = json.load(__UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
else:
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase__ = {}
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
UpperCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(__UpperCAmelCase )
UpperCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase__ = "\n" + BPE_TOKEN_MERGES
if word.endswith(__UpperCAmelCase ):
UpperCAmelCase__ = word.replace(__UpperCAmelCase , "" )
UpperCAmelCase__ = word.replace(" " , __UpperCAmelCase )
UpperCAmelCase__ = word
return word
def lowercase_ (self : Tuple , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase__ = text.lower()
UpperCAmelCase__ = text.split()
UpperCAmelCase__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> int:
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ (self : Any , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.decoder.get(__UpperCAmelCase , self.unk_token )
return result
def lowercase_ (self : Dict , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
# make sure @@ tokens are concatenated
UpperCAmelCase__ = "".join(string.split(__UpperCAmelCase ) )
return string
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
UpperCAmelCase__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase__ = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 65 | 0 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int , a_ : int ) -> int:
while second != 0:
__SCREAMING_SNAKE_CASE :Union[str, Any] = first & second
first ^= second
__SCREAMING_SNAKE_CASE :Optional[int] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = int(input("Enter the first number: ").strip())
lowerCamelCase_ = int(input("Enter the second number: ").strip())
print(f'{add(first, second) = }') | 362 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = '''levit'''
def __init__( self ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=[1_28, 2_56, 3_84] ,SCREAMING_SNAKE_CASE__=[4, 8, 12] ,SCREAMING_SNAKE_CASE__=[4, 4, 4] ,SCREAMING_SNAKE_CASE__=[16, 16, 16] ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=[2, 2, 2] ,SCREAMING_SNAKE_CASE__=[2, 2, 2] ,SCREAMING_SNAKE_CASE__=0.0_2 ,**SCREAMING_SNAKE_CASE__ ,) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = image_size
__SCREAMING_SNAKE_CASE :Dict = num_channels
__SCREAMING_SNAKE_CASE :Optional[int] = kernel_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = stride
__SCREAMING_SNAKE_CASE :List[Any] = padding
__SCREAMING_SNAKE_CASE :Tuple = hidden_sizes
__SCREAMING_SNAKE_CASE :List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE :Optional[int] = depths
__SCREAMING_SNAKE_CASE :Optional[Any] = key_dim
__SCREAMING_SNAKE_CASE :Optional[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE :Tuple = patch_size
__SCREAMING_SNAKE_CASE :int = attention_ratio
__SCREAMING_SNAKE_CASE :List[Any] = mlp_ratio
__SCREAMING_SNAKE_CASE :str = initializer_range
__SCREAMING_SNAKE_CASE :Optional[Any] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : int = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self ) -> float:
"""simple docstring"""
return 1E-4 | 239 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _a ( UpperCamelCase__ ):
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''depth_multiplier''' ) )
class _a :
def __init__( self: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any]=13 , UpperCamelCase_: str=3 , UpperCamelCase_: Dict=32 , UpperCamelCase_: Optional[int]=0.25 , UpperCamelCase_: List[Any]=8 , UpperCamelCase_: List[str]=8 , UpperCamelCase_: str=6 , UpperCamelCase_: Union[str, Any]=32 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: str=True , UpperCamelCase_: int="relu6" , UpperCamelCase_: str=1_280 , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Tuple=0.02 , UpperCamelCase_: Dict=True , UpperCamelCase_: int=True , UpperCamelCase_: Union[str, Any]=10 , UpperCamelCase_: Optional[Any]=None , ) -> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = depth_multiplier
lowercase__ = depth_divisible_by
lowercase__ = min_depth
lowercase__ = expand_ratio
lowercase__ = tf_padding
lowercase__ = output_stride
lowercase__ = first_layer_is_expansion
lowercase__ = finegrained_output
lowercase__ = hidden_act
lowercase__ = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowercase__ = classifier_dropout_prob
lowercase__ = use_labels
lowercase__ = is_training
lowercase__ = num_labels
lowercase__ = initializer_range
lowercase__ = scope
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str] ) -> Any:
"""simple docstring"""
lowercase__ = MobileNetVaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = MobileNetVaForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = MobileNetVaForSemanticSegmentation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase__ = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : str = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowercase : Any = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase : Dict = False
_lowercase : Any = False
_lowercase : List[str] = False
_lowercase : int = False
def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = MobileNetVaModelTester(self )
lowercase__ = MobileNetVaConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def lowerCamelCase_ ( self: Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def lowerCamelCase_ ( self: Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def lowerCamelCase_ ( self: str ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: Any ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Dict ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str] ):
lowercase__ = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = outputs.hidden_states
lowercase__ = 16
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = MobileNetVaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(UpperCamelCase_ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCamelCase_ )
# verify the logits
lowercase__ = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = torch.tensor([0.2445, -1.1993, 0.1905] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowercase__ = model.to(UpperCamelCase_ )
lowercase__ = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCamelCase_ )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , UpperCamelCase_ )
lowercase__ = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=UpperCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 110 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(SCREAMING_SNAKE_CASE , exponent // 2 , SCREAMING_SNAKE_CASE ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(SCREAMING_SNAKE_CASE , exponent - 1 , SCREAMING_SNAKE_CASE )) % modulo_value
def _a ( SCREAMING_SNAKE_CASE = 17_77 , SCREAMING_SNAKE_CASE = 18_55 , SCREAMING_SNAKE_CASE = 8 ):
"""simple docstring"""
lowercase__ = base
for _ in range(1 , SCREAMING_SNAKE_CASE ):
lowercase__ = _modexpt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 110 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : str = logging.get_logger(__name__)
def a__ ( snake_case__ , snake_case__ ) -> int:
lowerCamelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def a__ ( snake_case__ , snake_case__ ) -> Union[str, Any]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowerCamelCase = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
lowerCamelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
lowerCamelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowerCamelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
lowerCamelCase = dct.pop(snake_case__ )
lowerCamelCase = val
def a__ ( snake_case__ ) -> Dict:
if "handwritten" in checkpoint_url:
lowerCamelCase = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
lowerCamelCase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = ViTConfig(image_size=3_84 , qkv_bias=snake_case__ )
lowerCamelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowerCamelCase = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
lowerCamelCase = 10_24
lowerCamelCase = 40_96
lowerCamelCase = 24
lowerCamelCase = 16
lowerCamelCase = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase = False
lowerCamelCase = """relu"""
lowerCamelCase = 10_24
lowerCamelCase = True
lowerCamelCase = False
lowerCamelCase = False
# load HuggingFace model
lowerCamelCase = ViTModel(snake_case__ , add_pooling_layer=snake_case__ )
lowerCamelCase = TrOCRForCausalLM(snake_case__ )
lowerCamelCase = VisionEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
model.eval()
# load state_dict of original model, rename some keys
lowerCamelCase = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" , check_hash=snake_case__ )["""model"""]
lowerCamelCase = create_rename_keys(snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowerCamelCase = state_dict.pop(snake_case__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
lowerCamelCase = val
else:
lowerCamelCase = val
# load state dict
model.load_state_dict(snake_case__ )
# Check outputs on an image
lowerCamelCase = ViTImageProcessor(size=encoder_config.image_size )
lowerCamelCase = RobertaTokenizer.from_pretrained("""roberta-large""" )
lowerCamelCase = TrOCRProcessor(snake_case__ , snake_case__ )
lowerCamelCase = processor(images=prepare_img(snake_case__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
lowerCamelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowerCamelCase = model(pixel_values=snake_case__ , decoder_input_ids=snake_case__ )
lowerCamelCase = outputs.logits
lowerCamelCase = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
lowerCamelCase = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowerCamelCase = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
lowerCamelCase = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
lowerCamelCase = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , snake_case__ , atol=1E-3 ), "First elements of logits not as expected"
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 168 |
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase : Dict = 16
lowerCAmelCase : int = 32
def a__ ( snake_case__ ) -> Optional[Any]:
return int(x / 2**20 )
class __magic_name__ :
'''simple docstring'''
def __enter__( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCamelCase = torch.cuda.memory_allocated()
return self
def __exit__( self , *_a ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
lowerCamelCase = torch.cuda.memory_allocated()
lowerCamelCase = torch.cuda.max_memory_allocated()
lowerCamelCase = bamb(self.end - self.begin )
lowerCamelCase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a__ ( snake_case__ , snake_case__ = 16 , snake_case__ = "bert-base-cased" , snake_case__ = 3_20 , snake_case__ = 1_60 , ) -> List[str]:
lowerCamelCase = AutoTokenizer.from_pretrained(snake_case__ )
lowerCamelCase = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": F'train[:{n_train}]', """validation""": F'validation[:{n_val}]'} )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(snake_case__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def a__ ( snake_case__ , snake_case__ ) -> Any:
# Initialize accelerator
lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config["""lr"""]
lowerCamelCase = int(config["""num_epochs"""] )
lowerCamelCase = int(config["""seed"""] )
lowerCamelCase = int(config["""batch_size"""] )
lowerCamelCase = args.model_name_or_path
set_seed(snake_case__ )
lowerCamelCase , lowerCamelCase = get_dataloaders(snake_case__ , snake_case__ , snake_case__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCamelCase = 1
lowerCamelCase = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
lowerCamelCase = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase = 0
# Now we train the model
lowerCamelCase = {}
for epoch in range(snake_case__ , snake_case__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(snake_case__ ):
lowerCamelCase = model(**snake_case__ )
lowerCamelCase = outputs.loss
lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
def a__ ( ) -> str:
lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=snake_case__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case__ , )
parser.add_argument(
"""--output_dir""" , type=snake_case__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=snake_case__ , default=snake_case__ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=snake_case__ , default=3_20 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=snake_case__ , default=1_60 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=snake_case__ , default=1 , help="""Number of train epochs.""" , )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 168 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = ShapEPipeline
snake_case_ = ['''prompt''']
snake_case_ = ['''prompt''']
snake_case_ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
snake_case_ = False
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self ) -> str:
'''simple docstring'''
return 8
@property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(lowerCamelCase__ )
@property
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCamelCase = PriorTransformer(**lowerCamelCase__ )
return model
@property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**lowerCamelCase__ )
return model
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=lowerCamelCase__ , clip_sample=lowerCamelCase__ , clip_sample_range=1.0 , )
__lowerCamelCase = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Optional[Any]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('mps' ):
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = torch_device == 'cpu'
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase__ , relax_max_difference=lowerCamelCase__ , )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**lowerCamelCase__ )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> int:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__lowerCamelCase = ShapEPipeline.from_pretrained('openai/shap-e' )
__lowerCamelCase = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
__lowerCamelCase = pipe(
'a shark' , generator=lowerCamelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 90 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__A = {
"E": 1_2.7_0,
"T": 9.0_6,
"A": 8.1_7,
"O": 7.5_1,
"I": 6.9_7,
"N": 6.7_5,
"S": 6.3_3,
"H": 6.0_9,
"R": 5.9_9,
"D": 4.2_5,
"L": 4.0_3,
"C": 2.7_8,
"U": 2.7_6,
"M": 2.4_1,
"W": 2.3_6,
"F": 2.2_3,
"G": 2.0_2,
"Y": 1.9_7,
"P": 1.9_3,
"B": 1.2_9,
"V": 0.9_8,
"K": 0.7_7,
"J": 0.1_5,
"X": 0.1_5,
"Q": 0.1_0,
"Z": 0.0_7,
}
__A = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
__A = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> dict[str, int]:
"""simple docstring"""
__lowerCamelCase = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCamelCase_ ( UpperCamelCase__ : tuple ) -> str:
"""simple docstring"""
return x[0]
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str:
"""simple docstring"""
__lowerCamelCase = get_letter_count(UpperCamelCase__ )
__lowerCamelCase = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(UpperCamelCase__ )
__lowerCamelCase = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=UpperCamelCase__ )
__lowerCamelCase = ''.join(freq_to_letter[freq] )
__lowerCamelCase = list(freq_to_letter_str.items() )
freq_pairs.sort(key=UpperCamelCase__ , reverse=UpperCamelCase__ )
__lowerCamelCase = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> int:
"""simple docstring"""
__lowerCamelCase = get_frequency_order(UpperCamelCase__ )
__lowerCamelCase = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 | 1 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str]=False ) -> int:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
snake_case = os.path.abspath(__lowerCAmelCase )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
snake_case = torch.load(__lowerCAmelCase , map_location="""cpu""" )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
snake_case = convert_pytorch_state_dict_to_flax(__lowerCAmelCase , __lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
snake_case = convert_pytorch_sharded_state_dict_to_flax(__lowerCAmelCase , __lowerCAmelCase )
return flax_state_dict
def __lowerCamelCase ( __lowerCAmelCase : Tuple[str] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, jnp.ndarray] , __lowerCAmelCase : str , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(__lowerCAmelCase : Tuple[str] ) -> bool:
return len(set(__lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
snake_case = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
snake_case = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
snake_case = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
snake_case = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
snake_case = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
snake_case = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
snake_case = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
snake_case = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
snake_case = pt_tuple_key[-2] + """_v"""
if name is not None:
snake_case = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : Dict ) -> List[Any]:
# convert pytorch tensor to numpy
snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
snake_case = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
snake_case = flax_model.params["""params"""]
else:
snake_case = flax_model.params
snake_case = flatten_dict(__lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
snake_case = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(__lowerCAmelCase )
snake_case = {}
snake_case = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
snake_case = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
snake_case = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case = pt_tuple_key[1:]
# Correctly rename weight parameters
snake_case , snake_case = rename_key_and_reshape_tensor(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# add model prefix if necessary
snake_case = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
snake_case = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
snake_case = jnp.asarray(__lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
snake_case = jnp.asarray(__lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
snake_case = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict ) -> str:
import torch
# Load the index
snake_case = {}
for shard_file in shard_filenames:
# load using msgpack utils
snake_case = torch.load(__lowerCAmelCase )
snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
snake_case = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
snake_case = flax_model.params["""params"""]
snake_case = flatten_dict(__lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
snake_case = flax_model.params
snake_case = flatten_dict(__lowerCAmelCase )
snake_case = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
snake_case = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
snake_case = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case = pt_tuple_key[1:]
# Correctly rename weight parameters
snake_case , snake_case = rename_key_and_reshape_tensor(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# add model prefix if necessary
snake_case = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
snake_case = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
snake_case = jnp.asarray(__lowerCAmelCase )
continue
if "var" in flax_key[-1]:
snake_case = jnp.asarray(__lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
snake_case = jnp.asarray(__lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
snake_case = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : str ) -> str:
snake_case = os.path.abspath(__lowerCAmelCase )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
snake_case = getattr(__lowerCAmelCase , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCAmelCase , """rb""" ) as state_f:
try:
snake_case = from_bytes(__lowerCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ) -> int:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
snake_case = flatten_dict(jax.tree_util.tree_map(lambda __lowerCAmelCase : x.dtype == jnp.bfloataa , __lowerCAmelCase ) ).values()
if any(__lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
snake_case = jax.tree_util.tree_map(
lambda __lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowerCAmelCase )
snake_case = flatten_dict(__lowerCAmelCase )
snake_case = pt_model.state_dict()
snake_case = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
snake_case = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
snake_case = []
snake_case = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
snake_case = flax_key_tuple[0] == pt_model.base_model_prefix
snake_case = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
snake_case = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCAmelCase ) not in pt_model_dict:
# conv layer
snake_case = flax_key_tuple[:-1] + ("""weight""",)
snake_case = jnp.transpose(__lowerCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCAmelCase ) not in pt_model_dict:
# linear layer
snake_case = flax_key_tuple[:-1] + ("""weight""",)
snake_case = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
snake_case = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
snake_case = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
snake_case = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
snake_case = """.""".join(__lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
snake_case = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
snake_case = key.split(""".""" )
snake_case = None
if key_components[-3::2] == ["parametrizations", "original0"]:
snake_case = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
snake_case = key_components[-2] + """_v"""
if name is not None:
snake_case = key_components[:-3] + [name]
snake_case = """.""".join(__lowerCAmelCase )
snake_case = key
if flax_key in special_pt_names:
snake_case = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
snake_case = np.asarray(__lowerCAmelCase ) if not isinstance(__lowerCAmelCase , np.ndarray ) else flax_tensor
snake_case = torch.from_numpy(__lowerCAmelCase )
# remove from missing keys
missing_keys.remove(__lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCAmelCase )
pt_model.load_state_dict(__lowerCAmelCase )
# re-transform missing_keys to list
snake_case = list(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__lowerCAmelCase ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
""" use it for predictions and inference.""" )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
"""If your task is similar to the task the model of the checkpoint was trained on, """
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 3 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ) -> list:
snake_case = len(__lowerCAmelCase )
snake_case = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
snake_case = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase__ :Tuple = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Tuple = OPTConfig
__magic_name__ :Tuple = {}
__magic_name__ :Union[str, Any] = """gelu"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=9_9 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=2_0 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=1_6 , __UpperCAmelCase=1_6 , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = parent
lowerCAmelCase__ :Optional[int] = batch_size
lowerCAmelCase__ :Tuple = seq_length
lowerCAmelCase__ :Any = is_training
lowerCAmelCase__ :List[str] = use_labels
lowerCAmelCase__ :Optional[int] = vocab_size
lowerCAmelCase__ :str = hidden_size
lowerCAmelCase__ :int = num_hidden_layers
lowerCAmelCase__ :int = num_attention_heads
lowerCAmelCase__ :Optional[Any] = intermediate_size
lowerCAmelCase__ :Optional[Any] = hidden_act
lowerCAmelCase__ :Optional[int] = hidden_dropout_prob
lowerCAmelCase__ :Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ :List[str] = max_position_embeddings
lowerCAmelCase__ :Optional[int] = eos_token_id
lowerCAmelCase__ :Any = pad_token_id
lowerCAmelCase__ :List[str] = bos_token_id
lowerCAmelCase__ :Any = embed_dim
lowerCAmelCase__ :List[Any] = word_embed_proj_dim
lowerCAmelCase__ :str = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase__ :int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase__ :List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase__ :Dict = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__UpperCAmelCase , **self.config_updates , )
lowerCAmelCase__ :Dict = prepare_opt_inputs_dict(__UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = TFOPTModel(config=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = inputs_dict['input_ids']
lowerCAmelCase__ :List[Any] = input_ids[:1, :]
lowerCAmelCase__ :Tuple = inputs_dict['attention_mask'][:1, :]
lowerCAmelCase__ :Optional[Any] = 1
# first forward pass
lowerCAmelCase__ :List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ :Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ :List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase__ :Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase__ :Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase__ :str = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
lowerCAmelCase__ :List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase__ :int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase__ :List[Any] = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase__ :str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
@require_tf
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__magic_name__ :List[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
__magic_name__ :Dict = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
__magic_name__ :List[str] = False
__magic_name__ :Optional[int] = False
__magic_name__ :Optional[Any] = False
__magic_name__ :Union[str, Any] = 10
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = TFOPTModelTester(self )
lowerCAmelCase__ :Tuple = ConfigTester(self , config_class=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__UpperCAmelCase , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowerCAmelCase__ :Optional[Any] = model_class(config=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
lowerCAmelCase__ :List[Any] = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = _get_word_embedding_weight(__UpperCAmelCase , model.get_input_embeddings() )
lowerCAmelCase__ :Any = _get_word_embedding_weight(__UpperCAmelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCAmelCase__ :Union[str, Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __UpperCAmelCase )
# check that weights remain the same after resizing
lowerCAmelCase__ :int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase__ :str = False
self.assertTrue(__UpperCAmelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase__ :List[Any] = False
self.assertTrue(__UpperCAmelCase )
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
return tf.constant(_SCREAMING_SNAKE_CASE , dtype=tf.intaa )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = 99
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCAmelCase__ :Optional[Any] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCAmelCase__ :int = input_ids.shape[0]
lowerCAmelCase__ :Optional[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = TFOPTModel.from_pretrained('facebook/opt-350m' )
lowerCAmelCase__ :Dict = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase__ :int = tf.not_equal(__UpperCAmelCase , model.config.pad_token_id )
with tf.GradientTape():
lowerCAmelCase__ :int = model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase ).last_hidden_state
lowerCAmelCase__ :Dict = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase__ :Dict = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-3 ) )
lowerCAmelCase__ :List[Any] = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
lowerCAmelCase__ :int = xla_generate(__UpperCAmelCase , __UpperCAmelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=4E-2 ) )
@require_tf
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ :List[str] = 'facebook/opt-350m'
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCAmelCase__ :List[Any] = GPTaTokenizer.from_pretrained(self.path_model )
lowerCAmelCase__ :int = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase__ :List[str] = tokenizer(__UpperCAmelCase , return_tensors='tf' , padding=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCAmelCase__ :Tuple = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
lowerCAmelCase__ :List[Any] = tf.function(__UpperCAmelCase , jit_compile=__UpperCAmelCase )
lowerCAmelCase__ :Dict = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 ) )
@require_tf
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = 'facebook/opt-125m'
lowerCAmelCase__ :List[str] = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase__ :Any = []
lowerCAmelCase__ :int = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
lowerCAmelCase__ :List[Any] = tokenizer(__UpperCAmelCase , return_tensors='tf' ).input_ids
lowerCAmelCase__ :List[Any] = model.generate(__UpperCAmelCase , max_length=1_0 )
lowerCAmelCase__ :Dict = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = 'facebook/opt-350m'
lowerCAmelCase__ :List[Any] = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = 'left'
# use different length sentences to test batching
lowerCAmelCase__ :Any = [
'Hello, my dog is a little',
'Today, I',
]
lowerCAmelCase__ :Dict = tokenizer(__UpperCAmelCase , return_tensors='tf' , padding=__UpperCAmelCase )
lowerCAmelCase__ :str = inputs['input_ids']
lowerCAmelCase__ :Optional[int] = model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs['attention_mask'] )
lowerCAmelCase__ :str = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
lowerCAmelCase__ :List[str] = model.generate(input_ids=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
lowerCAmelCase__ :Dict = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
lowerCAmelCase__ :List[str] = model.generate(input_ids=__UpperCAmelCase , max_length=model.config.max_length - num_paddings )
lowerCAmelCase__ :List[Any] = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :int = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 'facebook/opt-350m'
lowerCAmelCase__ :Optional[int] = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase__ :Optional[int] = []
lowerCAmelCase__ :List[Any] = GPTaTokenizer.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = TFOPTForCausalLM.from_pretrained(__UpperCAmelCase )
for prompt in self.prompts:
lowerCAmelCase__ :int = tokenizer(__UpperCAmelCase , return_tensors='tf' ).input_ids
lowerCAmelCase__ :str = model.generate(__UpperCAmelCase , max_length=1_0 )
lowerCAmelCase__ :int = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 293 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__A = TypeVar("""KEY""")
__A = TypeVar("""VAL""")
@dataclass(frozen=a , slots=a )
class _lowerCAmelCase ( Generic[KEY, VAL] ):
"""simple docstring"""
__magic_name__ :KEY
__magic_name__ :VAL
class _lowerCAmelCase ( _Item ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
__A = _DeletedItem()
class _lowerCAmelCase ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.75 ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = initial_block_size
lowerCAmelCase__ :list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ :Tuple = capacity_factor
lowerCAmelCase__ :str = 0
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = self._buckets[ind]
if not stored:
lowerCAmelCase__ :Dict = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ :Optional[Any] = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ :Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self._buckets
lowerCAmelCase__ :Tuple = [None] * new_size
lowerCAmelCase__ :List[Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def snake_case ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def snake_case ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ :Tuple = self._get_next_ind(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
lowerCAmelCase__ :int = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ :List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
lowerCAmelCase__ :str = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = ' ,'.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 293 | 1 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase_ ( __magic_name__ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'OwlViTImageProcessor'
lowercase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , A=None , A=None , **A ) -> List[Any]:
UpperCAmelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A , )
UpperCAmelCase : List[Any] = kwargs.pop("""feature_extractor""" )
UpperCAmelCase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A , A )
def __call__( self , A=None , A=None , A=None , A="max_length" , A="np" , **A ) -> Tuple:
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(A , A ) or (isinstance(A , A ) and not isinstance(text[0] , A )):
UpperCAmelCase : Dict = [self.tokenizer(A , padding=A , return_tensors=A , **A )]
elif isinstance(A , A ) and isinstance(text[0] , A ):
UpperCAmelCase : Union[str, Any] = []
# Maximum number of queries across batch
UpperCAmelCase : int = max([len(A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A ) != max_num_queries:
UpperCAmelCase : Union[str, Any] = t + [""" """] * (max_num_queries - len(A ))
UpperCAmelCase : Optional[int] = self.tokenizer(A , padding=A , return_tensors=A , **A )
encodings.append(A )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
UpperCAmelCase : Tuple = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase : Optional[Any] = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase : Tuple = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase : Optional[Any] = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase : str = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
UpperCAmelCase : Tuple = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase : Dict = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase : Any = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
UpperCAmelCase : List[str] = BatchEncoding()
UpperCAmelCase : Tuple = input_ids
UpperCAmelCase : List[Any] = attention_mask
if query_images is not None:
UpperCAmelCase : str = BatchEncoding()
UpperCAmelCase : List[str] = self.image_processor(
A , return_tensors=A , **A ).pixel_values
UpperCAmelCase : List[Any] = query_pixel_values
if images is not None:
UpperCAmelCase : Union[str, Any] = self.image_processor(A , return_tensors=A , **A )
if text is not None and images is not None:
UpperCAmelCase : int = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) , tensor_type=A )
def _lowercase( self , *A , **A ) -> str:
return self.image_processor.post_process(*A , **A )
def _lowercase( self , *A , **A ) -> Union[str, Any]:
return self.image_processor.post_process_object_detection(*A , **A )
def _lowercase( self , *A , **A ) -> List[Any]:
return self.image_processor.post_process_image_guided_detection(*A , **A )
def _lowercase( self , *A , **A ) -> Optional[Any]:
return self.tokenizer.batch_decode(*A , **A )
def _lowercase( self , *A , **A ) -> str:
return self.tokenizer.decode(*A , **A )
@property
def _lowercase( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A , )
return self.image_processor_class
@property
def _lowercase( self ) -> str:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A , )
return self.image_processor
| 338 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : Optional[int] = split_dict._to_yaml_list()
assert len(_lowercase ) == len(_lowercase )
UpperCAmelCase : List[Any] = SplitDict._from_yaml_list(_lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase : List[str] = None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase : int = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=_lowercase ), SplitInfo(dataset_name="""my_dataset""" )] )
def __lowerCamelCase ( _lowercase ) -> List[str]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
UpperCAmelCase : Optional[Any] = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 338 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :int ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase__( self :List[Any] ) -> Tuple:
a__ = 1
a__ = 3
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(__snake_case )
return image
@property
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
torch.manual_seed(0 )
a__ = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
return model
@property
def lowerCamelCase__( self :int ) -> str:
torch.manual_seed(0 )
a__ = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
return model
@property
def lowerCamelCase__( self :Optional[int] ) -> Dict:
torch.manual_seed(0 )
a__ = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=50_06 ,)
return RobertaSeriesModelWithTransformation(__snake_case )
@property
def lowerCamelCase__( self :Optional[int] ) -> int:
def extract(*__snake_case :Any ,**__snake_case :str ):
class snake_case_ :
def __init__( self :Any ) -> int:
a__ = torch.ones([0] )
def lowerCamelCase__( self :Any ,__snake_case :Tuple ) -> Optional[int]:
self.pixel_values.to(__snake_case )
return self
return Out()
return extract
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
a__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ = self.dummy_cond_unet
a__ = PNDMScheduler(skip_prk_steps=__snake_case )
a__ = self.dummy_vae
a__ = self.dummy_text_encoder
a__ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
a__ = 77
a__ = self.dummy_image.to(__snake_case )
a__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
a__ = AltDiffusionImgaImgPipeline(
unet=__snake_case ,scheduler=__snake_case ,vae=__snake_case ,text_encoder=__snake_case ,tokenizer=__snake_case ,safety_checker=__snake_case ,feature_extractor=self.dummy_extractor ,)
a__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=__snake_case )
a__ = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
a__ = 'A painting of a squirrel eating a burger'
a__ = torch.Generator(device=__snake_case ).manual_seed(0 )
a__ = alt_pipe(
[prompt] ,generator=__snake_case ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='np' ,image=__snake_case ,)
a__ = output.images
a__ = torch.Generator(device=__snake_case ).manual_seed(0 )
a__ = alt_pipe(
[prompt] ,generator=__snake_case ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='np' ,image=__snake_case ,return_dict=__snake_case ,)[0]
a__ = image[0, -3:, -3:, -1]
a__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a__ = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' ,'This test requires a GPU' )
def lowerCamelCase__( self :str ) -> Tuple:
a__ = self.dummy_cond_unet
a__ = PNDMScheduler(skip_prk_steps=__snake_case )
a__ = self.dummy_vae
a__ = self.dummy_text_encoder
a__ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
a__ = 77
a__ = self.dummy_image.to(__snake_case )
# put models in fp16
a__ = unet.half()
a__ = vae.half()
a__ = bert.half()
# make sure here that pndm scheduler skips prk
a__ = AltDiffusionImgaImgPipeline(
unet=__snake_case ,scheduler=__snake_case ,vae=__snake_case ,text_encoder=__snake_case ,tokenizer=__snake_case ,safety_checker=__snake_case ,feature_extractor=self.dummy_extractor ,)
a__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=__snake_case )
a__ = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
a__ = 'A painting of a squirrel eating a burger'
a__ = torch.manual_seed(0 )
a__ = alt_pipe(
[prompt] ,generator=__snake_case ,num_inference_steps=2 ,output_type='np' ,image=__snake_case ,).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' ,'This test requires a GPU' )
def lowerCamelCase__( self :str ) -> Optional[int]:
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
a__ = init_image.resize((7_60, 5_04) )
a__ = 'BAAI/AltDiffusion'
a__ = AltDiffusionImgaImgPipeline.from_pretrained(
__snake_case ,safety_checker=__snake_case ,)
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
a__ = 'A fantasy landscape, trending on artstation'
a__ = torch.manual_seed(0 )
a__ = pipe(
prompt=__snake_case ,image=__snake_case ,strength=0.75 ,guidance_scale=7.5 ,generator=__snake_case ,output_type='np' ,)
a__ = output.images[0]
a__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
a__ = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :List[Any] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
a__ = init_image.resize((7_68, 5_12) )
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
a__ = 'BAAI/AltDiffusion'
a__ = AltDiffusionImgaImgPipeline.from_pretrained(
__snake_case ,safety_checker=__snake_case ,)
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
a__ = 'A fantasy landscape, trending on artstation'
a__ = torch.manual_seed(0 )
a__ = pipe(
prompt=__snake_case ,image=__snake_case ,strength=0.75 ,guidance_scale=7.5 ,generator=__snake_case ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 240 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
snake_case : Dict = logging.get_logger(__name__)
snake_case : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case : List[Any] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
snake_case : int = {
'''distilbert-base-uncased''': 5_12,
'''distilbert-base-uncased-distilled-squad''': 5_12,
'''distilbert-base-cased''': 5_12,
'''distilbert-base-cased-distilled-squad''': 5_12,
'''distilbert-base-german-cased''': 5_12,
'''distilbert-base-multilingual-cased''': 5_12,
}
snake_case : Union[str, Any] = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Tuple = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ : Optional[int] = DistilBertTokenizer
def __init__( self :Dict ,__snake_case :Dict=None ,__snake_case :Optional[Any]=None ,__snake_case :Optional[Any]=True ,__snake_case :List[Any]="[UNK]" ,__snake_case :str="[SEP]" ,__snake_case :List[Any]="[PAD]" ,__snake_case :Tuple="[CLS]" ,__snake_case :Optional[int]="[MASK]" ,__snake_case :Dict=True ,__snake_case :Dict=None ,**__snake_case :List[Any] ,) -> Optional[int]:
super().__init__(
__snake_case ,tokenizer_file=__snake_case ,do_lower_case=__snake_case ,unk_token=__snake_case ,sep_token=__snake_case ,pad_token=__snake_case ,cls_token=__snake_case ,mask_token=__snake_case ,tokenize_chinese_chars=__snake_case ,strip_accents=__snake_case ,**__snake_case ,)
a__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,__snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' ,__snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,__snake_case ) != tokenize_chinese_chars
):
a__ = getattr(__snake_case ,normalizer_state.pop('type' ) )
a__ = do_lower_case
a__ = strip_accents
a__ = tokenize_chinese_chars
a__ = normalizer_class(**__snake_case )
a__ = do_lower_case
def lowerCamelCase__( self :Any ,__snake_case :List[str] ,__snake_case :int=None ) -> Dict:
a__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__( self :List[str] ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]:
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Tuple[str]:
a__ = self._tokenizer.model.save(__snake_case ,name=__snake_case )
return tuple(__snake_case )
| 240 | 1 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_lowerCAmelCase : Dict = state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Dict = in_proj_weight[
: encoder_config.hidden_size, :
]
_lowerCAmelCase : Any = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_lowerCAmelCase : str = in_proj_weight[
-encoder_config.hidden_size :, :
]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = dct.pop(_lowerCamelCase )
_lowerCAmelCase : List[Any] = val
def A ( _lowerCamelCase ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
_lowerCAmelCase : List[str] = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowerCAmelCase : Union[str, Any] = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
_lowerCAmelCase : str = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCamelCase )
_lowerCAmelCase : List[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_lowerCAmelCase : Tuple = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
_lowerCAmelCase : Dict = 1_024
_lowerCAmelCase : Optional[int] = 4_096
_lowerCAmelCase : Union[str, Any] = 24
_lowerCAmelCase : int = 16
_lowerCAmelCase : int = 1_024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Any = "relu"
_lowerCAmelCase : int = 1_024
_lowerCAmelCase : Any = True
_lowerCAmelCase : Dict = False
_lowerCAmelCase : List[Any] = False
# load HuggingFace model
_lowerCAmelCase : List[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase )
_lowerCAmelCase : str = TrOCRForCausalLM(_lowerCamelCase )
_lowerCAmelCase : Any = VisionEncoderDecoderModel(encoder=_lowerCamelCase , decoder=_lowerCamelCase )
model.eval()
# load state_dict of original model, rename some keys
_lowerCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" , check_hash=_lowerCamelCase )["model"]
_lowerCAmelCase : Any = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
_lowerCAmelCase : str = val
else:
_lowerCAmelCase : Optional[int] = val
# load state dict
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image
_lowerCAmelCase : List[str] = ViTImageProcessor(size=encoder_config.image_size )
_lowerCAmelCase : List[str] = RobertaTokenizer.from_pretrained("roberta-large" )
_lowerCAmelCase : int = TrOCRProcessor(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : str = processor(images=prepare_img(_lowerCamelCase ) , return_tensors="pt" ).pixel_values
# verify logits
_lowerCAmelCase : Any = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_lowerCAmelCase : Optional[int] = model(pixel_values=_lowerCamelCase , decoder_input_ids=_lowerCamelCase )
_lowerCAmelCase : List[Any] = outputs.logits
_lowerCAmelCase : Tuple = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
_lowerCAmelCase : Optional[Any] = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
_lowerCAmelCase : int = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
_lowerCAmelCase : List[Any] = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
_lowerCAmelCase : str = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _lowerCamelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_snake_case = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 300 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A ( _lowerCamelCase = "laptop" ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
_lowerCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase , headers=_lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCAmelCase : Any = item.ha.text
_lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCAmelCase : Any = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCAmelCase : List[str] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCAmelCase : str = "Not available"
try:
_lowerCAmelCase : Optional[Any] = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCAmelCase : Optional[Any] = ""
try:
_lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCAmelCase : Optional[Any] = float("nan" )
except AttributeError:
pass
_lowerCAmelCase : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCAmelCase : List[str] = " "
_lowerCAmelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 300 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(__UpperCAmelCase ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), )
return min(
minimax(depth + 1, node_index * 2, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ), )
def __magic_name__ ( ) -> None:
'''simple docstring'''
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 3_4423]
snake_case_ = math.log(len(__UpperCAmelCase ), 2 )
print('''Optimal value : ''', end='''''' )
print(minimax(0, 0, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 56 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
assert isinstance(__UpperCAmelCase, __UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ = SqlDatasetReader(
'''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase, keep_in_memory=__UpperCAmelCase ).read()
_check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case_ = features.copy() if features else default_expected_features
snake_case_ = (
Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, features=__UpperCAmelCase, cache_dir=__UpperCAmelCase ).read()
_check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con:
snake_case_ = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' )
snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read()
SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=1 ).write()
snake_case_ = iter_sql_file(__UpperCAmelCase )
snake_case_ = iter_sql_file(__UpperCAmelCase )
for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' )
snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read()
SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=2 ).write()
snake_case_ = iter_sql_file(__UpperCAmelCase )
snake_case_ = iter_sql_file(__UpperCAmelCase )
for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' )
snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read()
with pytest.raises(__UpperCAmelCase ):
SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=0 ).write()
| 56 | 1 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_lowerCamelCase : str = logging.get_logger(__name__)
class lowercase ( a ):
def __snake_case( self : List[str] , _UpperCamelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Dict ) -> List[str]:
'''simple docstring'''
if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(_UpperCamelCase ) )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = [sequences]
SCREAMING_SNAKE_CASE = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_UpperCamelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(a )
class lowercase ( a ):
def __init__( self : Union[str, Any] , _UpperCamelCase : List[str]=ZeroShotClassificationArgumentHandler() , *_UpperCamelCase : str , **_UpperCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = args_parser
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict=True , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[int]=TruncationStrategy.ONLY_FIRST , **_UpperCamelCase : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
SCREAMING_SNAKE_CASE = self.tokenizer.eos_token
try:
SCREAMING_SNAKE_CASE = self.tokenizer(
_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , )
except Exception as e:
if "too short" in str(_UpperCamelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
SCREAMING_SNAKE_CASE = self.tokenizer(
_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors=_UpperCamelCase , padding=_UpperCamelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def __snake_case( self : str , **_UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
if kwargs.get("multi_class" , _UpperCamelCase ) is not None:
SCREAMING_SNAKE_CASE = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE = kwargs["hypothesis_template"]
SCREAMING_SNAKE_CASE = {}
if "multi_label" in kwargs:
SCREAMING_SNAKE_CASE = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _UpperCamelCase : Union[str, List[str]] , *_UpperCamelCase : List[str] , **_UpperCamelCase : int , ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
pass
elif len(_UpperCamelCase ) == 1 and "candidate_labels" not in kwargs:
SCREAMING_SNAKE_CASE = args[0]
else:
raise ValueError(F"Unable to understand extra arguments {args}" )
return super().__call__(_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : Any , _UpperCamelCase : List[str]=None , _UpperCamelCase : str="This example is {}." ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._args_parser(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ):
SCREAMING_SNAKE_CASE = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_UpperCamelCase ) - 1,
**model_input,
}
def __snake_case( self : List[Any] , _UpperCamelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = inputs["candidate_label"]
SCREAMING_SNAKE_CASE = inputs["sequence"]
SCREAMING_SNAKE_CASE = {k: inputs[k] for k in self.tokenizer.model_input_names}
SCREAMING_SNAKE_CASE = self.model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def __snake_case( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Any=False ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [outputs["candidate_label"] for outputs in model_outputs]
SCREAMING_SNAKE_CASE = [outputs["sequence"] for outputs in model_outputs]
SCREAMING_SNAKE_CASE = np.concatenate([output["logits"].numpy() for output in model_outputs] )
SCREAMING_SNAKE_CASE = logits.shape[0]
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = N // n
SCREAMING_SNAKE_CASE = logits.reshape((num_sequences, n, -1) )
if multi_label or len(_UpperCamelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
SCREAMING_SNAKE_CASE = self.entailment_id
SCREAMING_SNAKE_CASE = -1 if entailment_id == 0 else 0
SCREAMING_SNAKE_CASE = reshaped_outputs[..., [contradiction_id, entailment_id]]
SCREAMING_SNAKE_CASE = np.exp(_UpperCamelCase ) / np.exp(_UpperCamelCase ).sum(-1 , keepdims=_UpperCamelCase )
SCREAMING_SNAKE_CASE = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
SCREAMING_SNAKE_CASE = reshaped_outputs[..., self.entailment_id]
SCREAMING_SNAKE_CASE = np.exp(_UpperCamelCase ) / np.exp(_UpperCamelCase ).sum(-1 , keepdims=_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 358 | from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ):
try:
with open(UpperCAmelCase__ , "rb" ) as flax_state_f:
SCREAMING_SNAKE_CASE = from_bytes(UpperCAmelCase__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(UpperCAmelCase__ ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE = flatten_dict(jax.tree_util.tree_map(lambda UpperCAmelCase__ : x.dtype == jnp.bfloataa , UpperCAmelCase__ ) ).values()
if any(UpperCAmelCase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
SCREAMING_SNAKE_CASE = jax.tree_util.tree_map(
lambda UpperCAmelCase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE = flatten_dict(UpperCAmelCase__ , sep="." )
SCREAMING_SNAKE_CASE = pt_model.state_dict()
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
SCREAMING_SNAKE_CASE = flax_key_tuple_array[:-1] + ["weight"]
SCREAMING_SNAKE_CASE = jnp.transpose(UpperCAmelCase__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
SCREAMING_SNAKE_CASE = flax_key_tuple_array[:-1] + ["weight"]
SCREAMING_SNAKE_CASE = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
SCREAMING_SNAKE_CASE = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
SCREAMING_SNAKE_CASE = ".".join(UpperCAmelCase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE = np.asarray(UpperCAmelCase__ ) if not isinstance(UpperCAmelCase__ , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE = torch.from_numpy(UpperCAmelCase__ )
# remove from missing keys
missing_keys.remove(UpperCAmelCase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCAmelCase__ )
pt_model.load_state_dict(UpperCAmelCase__ )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(UpperCAmelCase__ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
" use it for predictions and inference." )
return pt_model
| 206 | 0 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
a_ : Union[str, Any] = logging.getLogger()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = '\n'.join(_UpperCAmelCase)
Path(_UpperCAmelCase).open('w').writelines(_UpperCAmelCase)
a_ : List[Any] = 'patrickvonplaten/t5-tiny-random'
a_ : Union[str, Any] = 'sshleifer/bart-tiny-random'
a_ : Union[str, Any] = 'sshleifer/tiny-mbart'
a_ : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
SCREAMING_SNAKE_CASE = Path(self.get_auto_remove_tmp_dir()) / 'utest_input.source'
SCREAMING_SNAKE_CASE = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(a , a)
SCREAMING_SNAKE_CASE = str(Path(self.get_auto_remove_tmp_dir()) / 'scores.json')
SCREAMING_SNAKE_CASE = 'translation_en_to_de' if model == T5_TINY else 'summarization'
SCREAMING_SNAKE_CASE = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(a , 'argv' , a):
run_generate()
assert Path(a).exists()
# os.remove(Path(output_file_name))
def SCREAMING_SNAKE_CASE__ ( self) -> str:
self.run_eval_tester(a)
@parameterized.expand([BART_TINY, MBART_TINY])
@slow
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[int]:
self.run_eval_tester(a)
@parameterized.expand([T5_TINY, MBART_TINY])
@slow
def SCREAMING_SNAKE_CASE__ ( self , a) -> Any:
SCREAMING_SNAKE_CASE = Path(self.get_auto_remove_tmp_dir()) / 'utest_input.source'
SCREAMING_SNAKE_CASE = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
SCREAMING_SNAKE_CASE = Path(self.get_auto_remove_tmp_dir())
SCREAMING_SNAKE_CASE = str(tmp_dir / 'scores.json')
SCREAMING_SNAKE_CASE = str(tmp_dir / 'val.target')
_dump_articles(a , text['en'])
_dump_articles(a , text['de'])
SCREAMING_SNAKE_CASE = 'translation_en_to_de' if model == T5_TINY else 'summarization'
SCREAMING_SNAKE_CASE = f'''
run_eval_search.py
{model}
{str(a)}
{str(a)}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'])
with patch.object(a , 'argv' , a):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE = [' num_beams | length_penalty', model, 'Best score args']
SCREAMING_SNAKE_CASE = ['Info']
if "translation" in task:
expected_strings.append('bleu')
else:
expected_strings.extend(a)
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(a).exists()
os.remove(Path(a))
| 137 |
import os
from datetime import datetime as dt
from github import Github
a_ : Tuple = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Github(os.environ['GITHUB_TOKEN'])
SCREAMING_SNAKE_CASE = g.get_repo('huggingface/diffusers')
SCREAMING_SNAKE_CASE = repo.get_issues(state='open')
for issue in open_issues:
SCREAMING_SNAKE_CASE = sorted(issue.get_comments() , key=lambda _UpperCAmelCase: i.created_at , reverse=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = comments[0] if len(_UpperCAmelCase) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed')
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open')
issue.remove_from_labels('stale')
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.')
issue.add_to_labels('stale')
if __name__ == "__main__":
main()
| 137 | 1 |
__A ='''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
lowerCamelCase_ = Stack()
lowerCamelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCamelCase__ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCamelCase__ )
elif i == ")":
# RULE 4
lowerCamelCase_ = operator_stack.peek()
operator_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operators[opr](lowerCamelCase__ , lowerCamelCase__ )
operand_stack.push(lowerCamelCase__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__A ='''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 363 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
__A =logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCamelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
datasets.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = train_dataset.features["label"].names
if training_args.do_eval:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = eval_dataset.features["label"].names
if training_args.do_predict:
lowerCamelCase_ = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = predict_dataset.features["label"].names
# Labels
lowerCamelCase_ = len(lowerCamelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , idalabel={str(lowerCamelCase__ ): label for i, label in enumerate(lowerCamelCase__ )} , labelaid={label: i for i, label in enumerate(lowerCamelCase__ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
def preprocess_function(lowerCamelCase__ ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=lowerCamelCase__ , max_length=data_args.max_seq_length , truncation=lowerCamelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_train_samples )
lowerCamelCase_ = train_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCamelCase_ = train_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCamelCase__ ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_eval_samples )
lowerCamelCase_ = eval_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCamelCase_ = eval_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCamelCase_ = min(len(lowerCamelCase__ ) , data_args.max_predict_samples )
lowerCamelCase_ = predict_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowerCamelCase_ = predict_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
lowerCamelCase_ = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions , lowerCamelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCamelCase__ , axis=1 )
return metric.compute(predictions=lowerCamelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , lowerCamelCase__ )
trainer.save_metrics("train" , lowerCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCamelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ )
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("eval" , lowerCamelCase__ )
trainer.save_metrics("eval" , lowerCamelCase__ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = trainer.predict(lowerCamelCase__ , metric_key_prefix="predict" )
lowerCamelCase_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCamelCase__ )
)
lowerCamelCase_ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("predict" , lowerCamelCase__ )
trainer.save_metrics("predict" , lowerCamelCase__ )
lowerCamelCase_ = np.argmax(lowerCamelCase__ , axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(lowerCamelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 47 | 0 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __snake_case ( UpperCamelCase_ ):
def __init__( self : Any , A_ : Dict="" , A_ : Tuple="train"):
assert os.path.isdir(A_)
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : Dict = os.listdir(A_)
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCAmelCase_ : str = os.path.join(A_ , A_)
if not os.path.isfile(A_):
continue
self.documents.append(A_)
def __len__( self : Optional[int]):
return len(self.documents)
def __getitem__( self : Optional[int] , A_ : Tuple):
lowerCAmelCase_ : List[str] = self.documents[idx]
lowerCAmelCase_ : Optional[Any] = document_path.split('''/''')[-1]
with open(A_ , encoding='''utf-8''') as source:
lowerCAmelCase_ : Any = source.read()
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = process_story(A_)
return document_name, story_lines, summary_lines
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ):
lowerCAmelCase_ : Tuple = list(filter(lambda __UpperCamelCase : len(__UpperCamelCase ) != 0 ,[line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCAmelCase_ : Union[str, Any] = [_add_missing_period(__UpperCamelCase ) for line in nonempty_lines]
# gather article lines
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : Any = deque(__UpperCamelCase )
while True:
try:
lowerCAmelCase_ : Any = lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(__UpperCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCAmelCase_ : str = list(filter(lambda __UpperCamelCase : not t.startswith('''@highlight''' ) ,__UpperCamelCase ) )
return story_lines, summary_lines
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ):
lowerCAmelCase_ : Any = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def UpperCamelCase( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ):
if len(__UpperCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__UpperCamelCase )) )
return sequence
def UpperCamelCase( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = torch.ones_like(__UpperCamelCase )
lowerCAmelCase_ : Dict = sequence == pad_token_id
lowerCAmelCase_ : int = 0
return mask
def UpperCamelCase( __UpperCamelCase : Tuple ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
lowerCAmelCase_ : Dict = [tokenizer.encode(__UpperCamelCase ) for line in story_lines]
lowerCAmelCase_ : Optional[Any] = [token for sentence in story_lines_token_ids for token in sentence]
lowerCAmelCase_ : Tuple = [tokenizer.encode(__UpperCamelCase ) for line in summary_lines]
lowerCAmelCase_ : Optional[Any] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def UpperCamelCase( __UpperCamelCase : List[str] ,__UpperCamelCase : Tuple ):
lowerCAmelCase_ : Union[str, Any] = []
for sequence in batch:
lowerCAmelCase_ : str = -1
lowerCAmelCase_ : List[str] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__UpperCamelCase )
return torch.tensor(__UpperCamelCase )
| 103 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 100_0000 , SCREAMING_SNAKE_CASE__ = 10 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : defaultdict = defaultdict(SCREAMING_SNAKE_CASE__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_SCREAMING_SNAKE_CASE : int = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_SCREAMING_SNAKE_CASE : List[str] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 200 | 0 |
class _a :
def __init__( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: List[str]=None ) -> Optional[int]:
"""simple docstring"""
lowercase__ = data
lowercase__ = previous
lowercase__ = next_node
def __str__( self: List[str] ) -> str:
"""simple docstring"""
return f'{self.data}'
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
return self.data
def lowerCamelCase_ ( self: List[str] ) -> List[Any]:
"""simple docstring"""
return self.next
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
return self.previous
class _a :
def __init__( self: Dict , UpperCamelCase_: str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = head
def __iter__( self: str ) -> Optional[int]:
"""simple docstring"""
return self
def lowerCamelCase_ ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
if not self.current:
raise StopIteration
else:
lowercase__ = self.current.get_data()
lowercase__ = self.current.get_next()
return value
class _a :
def __init__( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = None # First node in list
lowercase__ = None # Last node in list
def __str__( self: Dict ) -> int:
"""simple docstring"""
lowercase__ = self.head
lowercase__ = []
while current is not None:
nodes.append(current.get_data() )
lowercase__ = current.get_next()
return " ".join(str(UpperCamelCase_ ) for node in nodes )
def __contains__( self: Union[str, Any] , UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.head
while current:
if current.get_data() == value:
return True
lowercase__ = current.get_next()
return False
def __iter__( self: Optional[Any] ) -> int:
"""simple docstring"""
return LinkedListIterator(self.head )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.head:
return self.head.get_data()
return None
def lowerCamelCase_ ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
if self.tail:
return self.tail.get_data()
return None
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Node ) -> None:
"""simple docstring"""
if self.head is None:
lowercase__ = node
lowercase__ = node
else:
self.insert_before_node(self.head , UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Node ) -> None:
"""simple docstring"""
if self.head is None:
self.set_head(UpperCamelCase_ )
else:
self.insert_after_node(self.tail , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int ) -> None:
"""simple docstring"""
lowercase__ = Node(UpperCamelCase_ )
if self.head is None:
self.set_head(UpperCamelCase_ )
else:
self.set_tail(UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Node , UpperCamelCase_: Node ) -> None:
"""simple docstring"""
lowercase__ = node
lowercase__ = node.previous
if node.get_previous() is None:
lowercase__ = node_to_insert
else:
lowercase__ = node_to_insert
lowercase__ = node_to_insert
def lowerCamelCase_ ( self: str , UpperCamelCase_: Node , UpperCamelCase_: Node ) -> None:
"""simple docstring"""
lowercase__ = node
lowercase__ = node.next
if node.get_next() is None:
lowercase__ = node_to_insert
else:
lowercase__ = node_to_insert
lowercase__ = node_to_insert
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int ) -> None:
"""simple docstring"""
lowercase__ = 1
lowercase__ = Node(UpperCamelCase_ )
lowercase__ = self.head
while node:
if current_position == position:
self.insert_before_node(UpperCamelCase_ , UpperCamelCase_ )
return
current_position += 1
lowercase__ = node.next
self.insert_after_node(self.tail , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int ) -> Node:
"""simple docstring"""
lowercase__ = self.head
while node:
if node.get_data() == item:
return node
lowercase__ = node.get_next()
raise Exception('''Node not found''' )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: str ) -> List[str]:
"""simple docstring"""
if (node := self.get_node(UpperCamelCase_ )) is not None:
if node == self.head:
lowercase__ = self.head.get_next()
if node == self.tail:
lowercase__ = self.tail.get_previous()
self.remove_node_pointers(UpperCamelCase_ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Node ) -> None:
"""simple docstring"""
if node.get_next():
lowercase__ = node.previous
if node.get_previous():
lowercase__ = node.next
lowercase__ = None
lowercase__ = None
def lowerCamelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.head is None
def _a ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
lowerCAmelCase = logging.getLogger(__name__)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE )
lowercase__ = {
'''repo_id''': str(SCREAMING_SNAKE_CASE ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(SCREAMING_SNAKE_CASE , '''git_log.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , indent=4 )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if params.n_gpu <= 0:
lowercase__ = 0
lowercase__ = -1
lowercase__ = True
lowercase__ = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase__ = int(os.environ['''WORLD_SIZE'''] )
lowercase__ = int(os.environ['''N_GPU_NODE'''] )
lowercase__ = int(os.environ['''RANK'''] )
# number of nodes / node ID
lowercase__ = params.world_size // params.n_gpu_per_node
lowercase__ = params.global_rank // params.n_gpu_per_node
lowercase__ = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase__ = 1
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1
lowercase__ = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase__ = params.node_id == 0 and params.local_rank == 0
lowercase__ = params.n_nodes > 1
# summary
lowercase__ = f'--- Global rank: {params.global_rank} - '
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 93 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( __lowercase : Tuple , __lowercase : Tuple = True , __lowercase : List[str] = math.inf , __lowercase : List[Any] = -math.inf , __lowercase : str = math.inf , __lowercase : Dict = -math.inf , __lowercase : Optional[int] = False , __lowercase : Optional[Any] = 100 , __lowercase : Optional[int] = 0.0_1 , __lowercase : int = 1 , ) -> Any:
_snake_case = False
_snake_case = search_prob
_snake_case = start_temperate
_snake_case = []
_snake_case = 0
_snake_case = None
while not search_end:
_snake_case = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case = current_state
scores.append(__a )
iterations += 1
_snake_case = None
_snake_case = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case = random.randint(0 , len(__a ) - 1 ) # picking a random neighbor
_snake_case = neighbors.pop(__a )
_snake_case = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case = picked_neighbor
else:
_snake_case = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case = picked_neighbor
_snake_case = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case = True
else:
_snake_case = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__a ) , __a )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( __lowercase : Union[str, Any] , __lowercase : Dict ) -> Tuple:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_lowerCamelCase : Tuple = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowerCamelCase : Dict = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
_lowerCamelCase : Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowerCamelCase : List[str] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def a_ ( __lowercase : int , __lowercase : List[str] ) -> Any:
return (3 * x**2) - (6 * y)
_lowerCamelCase : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowerCamelCase : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
_lowerCamelCase : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowerCamelCase : Optional[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
) | 282 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
UpperCamelCase__ :str = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :str = min_resolution
UpperCamelCase__ :Optional[Any] = max_resolution
UpperCamelCase__ :int = do_resize
UpperCamelCase__ :Optional[Any] = size
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :List[Any] = image_mean
UpperCamelCase__ :Dict = image_std
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :Union[str, Any] = rescale_factor
UpperCamelCase__ :Union[str, Any] = do_pad
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ :Dict = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ :int = self.size['''shortest_edge''']
UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ :str = self.size['''shortest_edge''']
UpperCamelCase__ :str = self.size['''shortest_edge''']
else:
UpperCamelCase__ :Any = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.loads(f.read() )
UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify orig_size
UpperCamelCase__ :Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase__ :Tuple = json.loads(f.read() )
UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify area
UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) )
# verify boxes
UpperCamelCase__ :Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) )
# verify image_id
UpperCamelCase__ :List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) )
# verify is_crowd
UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) )
# verify class_labels
UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) )
# verify masks
UpperCamelCase__ :Optional[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ )
# verify orig_size
UpperCamelCase__ :List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) )
# verify size
UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) | 97 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = IFImgaImgSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 263 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''MaskFormerFeatureExtractor''']
lowerCamelCase = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
lowerCamelCase = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 188 |
"""simple docstring"""
import os
import sys
import unittest
__snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__snake_case = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__snake_case = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> str:
_a = get_test_to_tester_mapping(__UpperCAmelCase )
_a = get_test_to_tester_mapping(__UpperCAmelCase )
_a = {'''BertModelTest''': '''BertModelTester'''}
_a = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = get_model_to_test_mapping(__UpperCAmelCase )
_a = get_model_to_test_mapping(__UpperCAmelCase )
_a = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = get_model_to_tester_mapping(__UpperCAmelCase )
_a = get_model_to_tester_mapping(__UpperCAmelCase )
_a = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
_a = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) | 320 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase : int = logging.get_logger("transformers.models.speecht5")
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
hf_model.apply_weight_norm()
lowercase :str = checkpoint["input_conv.weight_g"]
lowercase :List[str] = checkpoint["input_conv.weight_v"]
lowercase :List[Any] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
lowercase :int = checkpoint[F"upsamples.{i}.1.weight_g"]
lowercase :int = checkpoint[F"upsamples.{i}.1.weight_v"]
lowercase :Optional[Any] = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase :Optional[Any] = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
lowercase :Any = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
lowercase :Union[str, Any] = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
lowercase :int = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
lowercase :List[Any] = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
lowercase :Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
lowercase :Any = checkpoint["output_conv.1.weight_g"]
lowercase :str = checkpoint["output_conv.1.weight_v"]
lowercase :Dict = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, ):
if config_path is not None:
lowercase :Optional[int] = SpeechTaHifiGanConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
lowercase :str = SpeechTaHifiGanConfig()
lowercase :Optional[int] = SpeechTaHifiGan(__SCREAMING_SNAKE_CASE )
lowercase :Optional[Any] = torch.load(__SCREAMING_SNAKE_CASE )
load_weights(orig_checkpoint["model"]["generator"], __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
lowercase :str = np.load(__SCREAMING_SNAKE_CASE )
lowercase :Optional[Any] = stats[0].reshape(-1 )
lowercase :Optional[int] = stats[1].reshape(-1 )
lowercase :str = torch.from_numpy(__SCREAMING_SNAKE_CASE ).float()
lowercase :Optional[int] = torch.from_numpy(__SCREAMING_SNAKE_CASE ).float()
model.save_pretrained(__SCREAMING_SNAKE_CASE )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_UpperCAmelCase : str = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 370 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_UpperCAmelCase : Optional[int] = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 158 | 0 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowercase : Dict = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
A : Union[str, Any] = os.path.abspath(snake_case__ )
logger.info(F'Loading PyTorch weights from {pt_path}' )
A : Any = torch.load(snake_case__ , map_location='''cpu''' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ )
return flax_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool:
return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
A : Tuple = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
A : Dict = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
A : Any = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ):
A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ):
A : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A : Dict = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
A : Dict = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
A : List[Any] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
A : List[str] = pt_tuple_key[-2] + '''_v'''
if name is not None:
A : int = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
A : int = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
A : List[str] = flax_model.params['''params''']
else:
A : Dict = flax_model.params
A : List[Any] = flatten_dict(snake_case__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(snake_case__ )
A : int = {}
A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
A : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : str = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
A : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
A, A : Dict = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
A : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
A : Tuple = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
A : List[str] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
import torch
# Load the index
A : Union[str, Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
A : List[str] = torch.load(snake_case__ )
A : int = {k: v.numpy() for k, v in pt_state_dict.items()}
A : Tuple = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : Optional[int] = flax_model.params['''params''']
A : List[Any] = flatten_dict(snake_case__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
A : Dict = flax_model.params
A : Tuple = flatten_dict(snake_case__ )
A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
A : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : int = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
A : List[str] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
A, A : Any = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
A : int = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
A : Optional[int] = jnp.asarray(snake_case__ )
continue
if "var" in flax_key[-1]:
A : Optional[int] = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = os.path.abspath(snake_case__ )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(snake_case__ , '''rb''' ) as state_f:
try:
A : int = from_bytes(snake_case__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
A : Optional[Any] = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
A : Union[str, Any] = flatten_dict(snake_case__ )
A : List[Any] = pt_model.state_dict()
A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
A : Tuple = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
A : int = []
A : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix
A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
A : List[str] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict:
# conv layer
A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict:
# linear layer
A : Tuple = flax_key_tuple[:-1] + ('''weight''',)
A : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
A : Union[str, Any] = '''.'''.join(snake_case__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
A : int = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
A : Optional[int] = key.split('''.''' )
A : Dict = None
if key_components[-3::2] == ["parametrizations", "original0"]:
A : List[str] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
A : List[Any] = key_components[-2] + '''_v'''
if name is not None:
A : str = key_components[:-3] + [name]
A : Optional[Any] = '''.'''.join(snake_case__ )
A : Optional[Any] = key
if flax_key in special_pt_names:
A : Optional[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
A : Dict = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
A : List[Any] = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(snake_case__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 3 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowercase : Dict = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
A : Union[str, Any] = os.path.abspath(snake_case__ )
logger.info(F'Loading PyTorch weights from {pt_path}' )
A : Any = torch.load(snake_case__ , map_location='''cpu''' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ )
return flax_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool:
return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
A : Tuple = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
A : Dict = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
A : Any = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ):
A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ):
A : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A : Dict = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
A : Dict = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
A : List[Any] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
A : List[str] = pt_tuple_key[-2] + '''_v'''
if name is not None:
A : int = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
A : int = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
A : List[str] = flax_model.params['''params''']
else:
A : Dict = flax_model.params
A : List[Any] = flatten_dict(snake_case__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(snake_case__ )
A : int = {}
A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
A : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : str = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
A : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
A, A : Dict = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
A : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
A : Tuple = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
A : List[str] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
import torch
# Load the index
A : Union[str, Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
A : List[str] = torch.load(snake_case__ )
A : int = {k: v.numpy() for k, v in pt_state_dict.items()}
A : Tuple = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : Optional[int] = flax_model.params['''params''']
A : List[Any] = flatten_dict(snake_case__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
A : Dict = flax_model.params
A : Tuple = flatten_dict(snake_case__ )
A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
A : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : int = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
A : List[str] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
A, A : Any = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
A : int = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
A : Optional[int] = jnp.asarray(snake_case__ )
continue
if "var" in flax_key[-1]:
A : Optional[int] = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = os.path.abspath(snake_case__ )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(snake_case__ , '''rb''' ) as state_f:
try:
A : int = from_bytes(snake_case__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
A : Optional[Any] = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
A : Union[str, Any] = flatten_dict(snake_case__ )
A : List[Any] = pt_model.state_dict()
A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
A : Tuple = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
A : int = []
A : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix
A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
A : List[str] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict:
# conv layer
A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict:
# linear layer
A : Tuple = flax_key_tuple[:-1] + ('''weight''',)
A : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
A : Union[str, Any] = '''.'''.join(snake_case__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
A : int = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
A : Optional[int] = key.split('''.''' )
A : Dict = None
if key_components[-3::2] == ["parametrizations", "original0"]:
A : List[str] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
A : List[Any] = key_components[-2] + '''_v'''
if name is not None:
A : str = key_components[:-3] + [name]
A : Optional[Any] = '''.'''.join(snake_case__ )
A : Optional[Any] = key
if flax_key in special_pt_names:
A : Optional[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
A : Dict = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
A : List[Any] = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(snake_case__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 3 | 1 |
"""simple docstring"""
import re
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if len(re.findall("[ATCG]" , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 340 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : str = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
for attribute in key.split("." ):
_lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCamelCase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCamelCase : Tuple = value
elif weight_type == "weight_g":
_lowerCamelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCamelCase : List[Any] = value
elif weight_type == "bias":
_lowerCamelCase : str = value
elif weight_type == "running_mean":
_lowerCamelCase : Optional[int] = value
elif weight_type == "running_var":
_lowerCamelCase : Optional[Any] = value
elif weight_type == "num_batches_tracked":
_lowerCamelCase : int = value
elif weight_type == "inv_freq":
_lowerCamelCase : List[str] = value
else:
_lowerCamelCase : Optional[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = fairseq_model.state_dict()
_lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
_lowerCamelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowerCamelCase : int = True
if "*" in mapped_key:
_lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2]
_lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase )
if "pos_bias_u" in name:
_lowerCamelCase : int = None
elif "pos_bias_v" in name:
_lowerCamelCase : Any = None
elif "weight_g" in name:
_lowerCamelCase : Any = "weight_g"
elif "weight_v" in name:
_lowerCamelCase : Any = "weight_v"
elif "bias" in name:
_lowerCamelCase : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase : Dict = "weight"
elif "running_mean" in name:
_lowerCamelCase : str = "running_mean"
elif "inv_freq" in name:
_lowerCamelCase : List[Any] = "inv_freq"
elif "running_var" in name:
_lowerCamelCase : Tuple = "running_var"
elif "num_batches_tracked" in name:
_lowerCamelCase : str = "num_batches_tracked"
else:
_lowerCamelCase : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = full_name.split("conv_layers." )[-1]
_lowerCamelCase : List[Any] = name.split("." )
_lowerCamelCase : Union[str, Any] = int(items[0] )
_lowerCamelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCamelCase : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCamelCase : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCamelCase : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCamelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" )
else:
_lowerCamelCase : Dict = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCamelCase : List[Any] = "rotary"
if is_finetuned:
if dict_path:
_lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase : Optional[int] = target_dict.pad_index
_lowerCamelCase : Dict = target_dict.bos_index
_lowerCamelCase : Optional[Any] = target_dict.eos_index
_lowerCamelCase : str = len(target_dict.symbols )
_lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCamelCase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
_lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False
_lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase )
else:
_lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" )
_lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
_lowerCamelCase : Dict = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 340 | 1 |
'''simple docstring'''
import math
class __UpperCAmelCase :
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = 0.0
_snake_case = 0.0
for i in range(len(lowerCAmelCase_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for i in range(len(lowerCAmelCase_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def SCREAMING_SNAKE_CASE__ ( ) -> None:
# Training Examples ( m, n )
_snake_case = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_snake_case = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_snake_case = SelfOrganizingMap()
_snake_case = 3
_snake_case = 0.5
for _ in range(__A ):
for j in range(len(__A ) ):
# training sample
_snake_case = training_samples[j]
# Compute the winning vector
_snake_case = self_organizing_map.get_winner(__A , __A )
# Update the winning vector
_snake_case = self_organizing_map.update(__A , __A , __A , __A )
# classify test sample
_snake_case = [0, 0, 0, 1]
_snake_case = self_organizing_map.get_winner(__A , __A )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 42 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A = 100 ) -> int:
_snake_case = n * (n + 1) * (2 * n + 1) / 6
_snake_case = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42 | 1 |
from manim import *
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
snake_case_ = Rectangle(height=0.5 , width=0.5 )
snake_case_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ = Rectangle(height=0.25 , width=0.25 )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
snake_case_ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
snake_case_ = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
snake_case_ = Text('''CPU''' , font_size=24 )
snake_case_ = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
snake_case_ = [mem.copy() for i in range(4 )]
snake_case_ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
snake_case_ = Text('''GPU''' , font_size=24 )
snake_case_ = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
snake_case_ = Text('''Model''' , font_size=24 )
snake_case_ = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
snake_case_ = []
snake_case_ = []
for i, rect in enumerate(_UpperCAmelCase ):
snake_case_ = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
snake_case_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
snake_case_ = [meta_mem.copy() for i in range(6 )]
snake_case_ = [meta_mem.copy() for i in range(6 )]
snake_case_ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
snake_case_ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
snake_case_ = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
snake_case_ = Text('''Disk''' , font_size=24 )
snake_case_ = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
snake_case_ = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
snake_case_ = Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
snake_case_ = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
snake_case_ = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
snake_case_ = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
snake_case_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
snake_case_ = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
snake_case_ = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
snake_case_ = a_c
snake_case_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
snake_case_ = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait() | 351 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure) | 267 | 0 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ = TypeVar("""T""")
class A__ ( Generic[T] ):
lowerCAmelCase__ : deque[T] # Cache store of keys
lowerCAmelCase__ : set[T] # References of the keys in cache
lowerCAmelCase__ : int = 10 # Maximum capacity of cache
def __init__( self : Optional[int] , _UpperCAmelCase : int ) -> None:
"""simple docstring"""
__lowercase = deque()
__lowercase = set()
if not n:
__lowercase = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
__lowercase = n
def a__ ( self : int , _UpperCAmelCase : T ) -> None:
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__lowercase = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> None:
"""simple docstring"""
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 325 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 99
__lowercase = 3_84
__lowercase = 2
__lowercase = 4
__lowercase = 37
__lowercase = 'gelu'
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 5_12
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = 1_28
__lowercase = 2
__lowercase = 9
__lowercase = 1
__lowercase = None
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModel(config=_UpperCAmelCase )
__lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase = [input_ids, input_mask]
__lowercase = model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[str] = False
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = True
if hasattr(_UpperCAmelCase , 'use_cache' ):
__lowercase = True
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
for model_class in self.all_model_classes:
__lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = model_class(_UpperCAmelCase )
__lowercase = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__lowercase = os.path.join(_UpperCAmelCase , 'saved_model' , '1' )
__lowercase = tf.keras.models.load_model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = outputs['encoder_hidden_states']
__lowercase = outputs['encoder_attentions']
else:
__lowercase = outputs['hidden_states']
__lowercase = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__lowercase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase : int ):
__lowercase = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__lowercase = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ):
__lowercase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__lowercase = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(_UpperCAmelCase )[0]
__lowercase = [1, 6, 7_68]
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
| 325 | 1 |
"""simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def a__ ( snake_case__ ) -> Optional[int]:
if isinstance(snake_case__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __magic_name__ :
'''simple docstring'''
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a=None , **_a ):
"""simple docstring"""
lowerCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(_a , _a )
lowerCamelCase = TFVisionTextDualEncoderModel(_a )
lowerCamelCase = model(input_ids=_a , pixel_values=_a , attention_mask=_a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a=None , **_a ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.get_vision_text_model(_a , _a )
lowerCamelCase = TFVisionTextDualEncoderModel(vision_model=_a , text_model=_a )
lowerCamelCase = model(input_ids=_a , pixel_values=_a , attention_mask=_a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a=None , **_a ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.get_vision_text_model(_a , _a )
lowerCamelCase = {"""vision_model""": vision_model, """text_model""": text_model}
lowerCamelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_a )
lowerCamelCase = model(input_ids=_a , pixel_values=_a , attention_mask=_a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a=None , **_a ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.get_vision_text_model(_a , _a )
lowerCamelCase = TFVisionTextDualEncoderModel(vision_model=_a , text_model=_a )
lowerCamelCase = model(input_ids=_a , pixel_values=_a , attention_mask=_a )
lowerCamelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
lowerCamelCase = TFVisionTextDualEncoderModel.from_pretrained(_a )
lowerCamelCase = model(input_ids=_a , pixel_values=_a , attention_mask=_a )
lowerCamelCase = after_output[0].numpy()
lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_a , 1e-5 )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a=None , **_a ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.get_vision_text_model(_a , _a )
lowerCamelCase = TFVisionTextDualEncoderModel(vision_model=_a , text_model=_a )
lowerCamelCase = model(
input_ids=_a , pixel_values=_a , attention_mask=_a , output_attentions=_a )
lowerCamelCase = output.vision_model_output.attentions
self.assertEqual(len(_a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase = to_atuple(vision_model.config.image_size )
lowerCamelCase = to_atuple(vision_model.config.patch_size )
lowerCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase = output.text_model_output.attentions
self.assertEqual(len(_a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = np.abs((a - b) ).max()
self.assertLessEqual(_a , _a , f'Difference between torch and flax is {diff} (>= {tol}).' )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
self.check_save_load(**_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_a )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.get_pretrained_model_and_inputs()
lowerCamelCase = model_a(**_a )
lowerCamelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_a )
lowerCamelCase = TFVisionTextDualEncoderModel.from_pretrained(_a )
lowerCamelCase = model_a(**_a )
lowerCamelCase = after_outputs[0].numpy()
lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_a , 1e-5 )
@require_tf
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
lowerCamelCase = 13
lowerCamelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCamelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCamelCase = random_attention_mask([batch_size, 4] )
lowerCamelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = TFViTModel(_a , name="""vision_model""" )
lowerCamelCase = TFBertModel(_a , name="""text_model""" )
return vision_model, text_model
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = TFViTModelTester(self )
lowerCamelCase = TFBertModelTester(self )
lowerCamelCase = vit_model_tester.prepare_config_and_inputs()
lowerCamelCase = bert_model_tester.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = vision_config_and_inputs
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
lowerCamelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
lowerCamelCase = 13
lowerCamelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCamelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCamelCase = random_attention_mask([batch_size, 4] )
lowerCamelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a=None , **_a ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.get_vision_text_model(_a , _a )
lowerCamelCase = TFVisionTextDualEncoderModel(vision_model=_a , text_model=_a )
lowerCamelCase = model(
input_ids=_a , pixel_values=_a , attention_mask=_a , output_attentions=_a )
lowerCamelCase = output.vision_model_output.attentions
self.assertEqual(len(_a ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCamelCase = to_atuple(vision_model.config.image_size )
lowerCamelCase = to_atuple(vision_model.config.patch_size )
lowerCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCamelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCamelCase = output.text_model_output.attentions
self.assertEqual(len(_a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = TFDeiTModel(_a , name="""vision_model""" )
lowerCamelCase = TFRobertaModel(_a , name="""text_model""" )
return vision_model, text_model
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = TFDeiTModelTester(self )
lowerCamelCase = TFRobertaModelTester(self )
lowerCamelCase = vit_model_tester.prepare_config_and_inputs()
lowerCamelCase = bert_model_tester.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = vision_config_and_inputs
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
lowerCamelCase = 13
lowerCamelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowerCamelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowerCamelCase = random_attention_mask([batch_size, 4] )
lowerCamelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = TFCLIPVisionModel(_a , name="""vision_model""" )
lowerCamelCase = TFBertModel(_a , name="""text_model""" )
return vision_model, text_model
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = TFCLIPVisionModelTester(self )
lowerCamelCase = TFBertModelTester(self )
lowerCamelCase = clip_model_tester.prepare_config_and_inputs()
lowerCamelCase = bert_model_tester.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase = vision_config_and_inputs
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_a )
lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_a , padding=_a , return_tensors="""np""" )
lowerCamelCase = model(**_a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCamelCase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _a , atol=1e-3 ) )
| 168 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a__ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ) -> Tuple:
if attention_mask is None:
lowerCamelCase = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = OPTConfig
__UpperCamelCase = {}
__UpperCamelCase = "gelu"
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=16 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , _a=16 , _a=16 , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = eos_token_id
lowerCamelCase = pad_token_id
lowerCamelCase = bos_token_id
lowerCamelCase = embed_dim
lowerCamelCase = word_embed_proj_dim
lowerCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_a , **self.config_updates , )
lowerCamelCase = prepare_opt_inputs_dict(_a , _a )
return config, inputs_dict
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = TFOPTModel(config=_a )
lowerCamelCase = inputs_dict["""input_ids"""]
lowerCamelCase = input_ids[:1, :]
lowerCamelCase = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase = 1
# first forward pass
lowerCamelCase = model(_a , attention_mask=_a , use_cache=_a )
lowerCamelCase , lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase = model(_a , attention_mask=_a )[0]
lowerCamelCase = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
@require_tf
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__UpperCamelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
__UpperCamelCase = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 10
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = TFOPTModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_a , _a ):
if hasattr(_a , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_a , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase = model_class(config=_a )
lowerCamelCase = _get_word_embedding_weight(_a , model.get_input_embeddings() )
lowerCamelCase = _get_word_embedding_weight(_a , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_a )
lowerCamelCase = _get_word_embedding_weight(_a , model.get_input_embeddings() )
lowerCamelCase = _get_word_embedding_weight(_a , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _a )
# check that weights remain the same after resizing
lowerCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase = False
self.assertTrue(_a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _a )
lowerCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase = False
self.assertTrue(_a )
def a__ ( snake_case__ ) -> List[Any]:
return tf.constant(snake_case__ , dtype=tf.intaa )
@require_tf
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = 99
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase = input_ids.shape[0]
lowerCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
lowerCamelCase = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowerCamelCase = tf.not_equal(_a , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase = model(input_ids=_a , attention_mask=_a ).last_hidden_state
lowerCamelCase = (1, 11, 512)
self.assertEqual(output.shape , _a )
lowerCamelCase = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=4e-3 ) )
lowerCamelCase = tf.function(_a , jit_compile=_a )
lowerCamelCase = xla_generate(_a , _a )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=4e-2 ) )
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase = """facebook/opt-350m"""
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase = tokenizer(_a , return_tensors="""tf""" , padding=_a , add_special_tokens=_a )
lowerCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(_a , _a , atol=1e-4 ) )
lowerCamelCase = tf.function(_a , jit_compile=_a )
lowerCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_a , _a , atol=1e-4 ) )
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """facebook/opt-125m"""
lowerCamelCase = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase = []
lowerCamelCase = GPTaTokenizer.from_pretrained(_a )
lowerCamelCase = TFOPTForCausalLM.from_pretrained(_a )
for prompt in self.prompts:
lowerCamelCase = tokenizer(_a , return_tensors="""tf""" ).input_ids
lowerCamelCase = model.generate(_a , max_length=10 )
lowerCamelCase = tokenizer.batch_decode(_a , skip_special_tokens=_a )
predicted_outputs += generated_string
self.assertListEqual(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """facebook/opt-350m"""
lowerCamelCase = GPTaTokenizer.from_pretrained(_a )
lowerCamelCase = TFOPTForCausalLM.from_pretrained(_a )
lowerCamelCase = """left"""
# use different length sentences to test batching
lowerCamelCase = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCamelCase = tokenizer(_a , return_tensors="""tf""" , padding=_a )
lowerCamelCase = inputs["""input_ids"""]
lowerCamelCase = model.generate(input_ids=_a , attention_mask=inputs["""attention_mask"""] )
lowerCamelCase = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCamelCase = model.generate(input_ids=_a )
lowerCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
lowerCamelCase = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCamelCase = model.generate(input_ids=_a , max_length=model.config.max_length - num_paddings )
lowerCamelCase = tokenizer.batch_decode(_a , skip_special_tokens=_a )
lowerCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_a )
lowerCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=_a )
lowerCamelCase = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , [non_padded_sentence, padded_sentence] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """facebook/opt-350m"""
lowerCamelCase = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase = []
lowerCamelCase = GPTaTokenizer.from_pretrained(_a )
lowerCamelCase = TFOPTForCausalLM.from_pretrained(_a )
for prompt in self.prompts:
lowerCamelCase = tokenizer(_a , return_tensors="""tf""" ).input_ids
lowerCamelCase = model.generate(_a , max_length=10 )
lowerCamelCase = tokenizer.batch_decode(_a , skip_special_tokens=_a )
predicted_outputs += generated_string
self.assertListEqual(_a , _a )
| 168 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Tuple = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __snake_case ( lowerCAmelCase ):
_a : Tuple= "vit"
def __init__( self ,snake_case=768 ,snake_case=12 ,snake_case=12 ,snake_case=3072 ,snake_case="gelu" ,snake_case=0.0 ,snake_case=0.0 ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=224 ,snake_case=16 ,snake_case=3 ,snake_case=True ,snake_case=16 ,**snake_case ,):
'''simple docstring'''
super().__init__(**snake_case )
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : Optional[int] = num_attention_heads
lowercase : Optional[Any] = intermediate_size
lowercase : str = hidden_act
lowercase : Tuple = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : Tuple = initializer_range
lowercase : int = layer_norm_eps
lowercase : Optional[Any] = image_size
lowercase : Any = patch_size
lowercase : List[Any] = num_channels
lowercase : Dict = qkv_bias
lowercase : List[Any] = encoder_stride
class __snake_case ( lowerCAmelCase ):
_a : Union[str, Any]= version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 1e-4
| 20 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCAmelCase : Any =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__lowerCAmelCase : Optional[int] =" def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Tuple )-> Union[str, Any]:
A__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
A__ = self.transformer_dir
shutil.copy(
os.path.join(lowercase_ , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def UpperCAmelCase_ ( self :Optional[int] )-> Tuple:
A__ = "src/transformers"
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :List[str] , lowercase_ :Union[str, Any] , lowercase_ :int , lowercase_ :Tuple=None )-> Optional[Any]:
A__ = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
A__ = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
A__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
A__ = black.format_str(lowercase_ , mode=lowercase_ )
A__ = os.path.join(self.transformer_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def UpperCAmelCase_ ( self :str )-> Optional[Any]:
A__ = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> Optional[int]:
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , lowercase_ ) , )
# Copy consistency with a really long name
A__ = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , lowercase_ , overwrite_result=re.sub("Bert" , "TestModel" , lowercase_ ) , )
def UpperCAmelCase_ ( self :Dict )-> Any:
A__ = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
A__, A__ = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme["format_model_list"] )
self.assertFalse(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
A__, A__ = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowercase_ )
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
A__, A__ = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(lowercase_ , lowercase_ )
| 237 | 0 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,_a : Union[str, "sqlalchemy.sql.Selectable"] ,_a : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_a : Optional[Features] = None ,_a : str = None ,_a : bool = False ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(features=_a ,cache_dir=_a ,keep_in_memory=_a ,**_a )
_a : Any = Sql(
cache_dir=_a ,features=_a ,sql=_a ,con=_a ,**_a ,)
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[str] = None
_a : Dict = None
_a : Any = None
_a : Optional[int] = None
self.builder.download_and_prepare(
download_config=_a ,download_mode=_a ,verification_mode=_a ,base_path=_a ,)
# Build dataset for splits
_a : Dict = self.builder.as_dataset(
split='train' ,verification_mode=_a ,in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : Dataset ,_a : str ,_a : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_a : Optional[int] = None ,_a : Optional[int] = None ,**_a : int ,):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_a : List[Any] = dataset
_a : Dict = name
_a : Optional[Any] = con
_a : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a : Optional[Any] = num_proc
_a : Any = to_sql_kwargs
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Dict = self.to_sql_kwargs.pop('sql' ,_a )
_a : Tuple = self.to_sql_kwargs.pop('con' ,_a )
_a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_a )
_a : Union[str, Any] = self._write(index=_a ,**self.to_sql_kwargs )
return written
def __lowercase ( self : Any ,_a : Tuple ):
'''simple docstring'''
_a, _a, _a : Any = args
_a : Any = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_a : Optional[int] = query_table(
table=self.dataset.data ,key=slice(_a ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_a : Any = batch.to_pandas()
_a : Union[str, Any] = df.to_sql(self.name ,self.con ,index=_a ,**_a )
return num_rows or len(_a )
def __lowercase ( self : Optional[int] ,_a : List[str] ,**_a : List[Any] ):
'''simple docstring'''
_a : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_a, _a : Union[str, Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_a ,_a )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += num_rows
return written
| 5 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_snake_case = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case_ = "AAPL" ):
_A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" )
_A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""",class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 26 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=2_2_4 , lowerCamelCase__=1_0_0_0 , lowerCamelCase__=[3, 3, 6, 4] , lowerCamelCase__=[4_8, 5_6, 1_1_2, 2_2_0] , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = num_labels
_lowerCamelCase = image_size
_lowerCamelCase = layer_depths
_lowerCamelCase = embed_dims
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=a__ , layer_scale_init_value=1e-5 , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = SwiftFormerModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = SwiftFormerForImageClassification(a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_lowerCamelCase = SwiftFormerForImageClassification(a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self ):
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = self.prepare_config_and_inputs()
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_( __a, __a, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowercase__ : Union[str, Any] = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Union[str, Any] = False
lowercase__ : Dict = False
lowercase__ : List[str] = False
lowercase__ : Union[str, Any] = False
lowercase__ : Optional[int] = False
def snake_case__ ( self ):
_lowerCamelCase = SwiftFormerModelTester(self )
_lowerCamelCase = ConfigTester(
self , config_class=a__ , has_text_modality=a__ , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(a__ )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(a__ )
_lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def snake_case__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = SwiftFormerModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCamelCase = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCamelCase = outputs.hidden_states
_lowerCamelCase = 8
self.assertEqual(len(a__ ) , a__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(a__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase = True
check_hidden_states_output(a__ , a__ , a__ )
def snake_case__ ( self ):
def _config_zero_init(lowerCamelCase__ ):
_lowerCamelCase = copy.deepcopy(a__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(a__ , a__ , 1e-10 )
if isinstance(getattr(a__ , a__ , a__ ) , a__ ):
_lowerCamelCase = _config_zero_init(getattr(a__ , a__ ) )
setattr(a__ , a__ , a__ )
return configs_no_init
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = _config_zero_init(a__ )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(config=a__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self ):
pass
def lowerCAmelCase_( ) -> str:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
_lowerCamelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(a__ )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
_lowerCamelCase = model(**a__ )
# verify the logits
_lowerCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , a__ )
_lowerCamelCase = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
| 359 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Dict = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
if not nums:
return 0
UpperCamelCase = nums[0]
UpperCamelCase = 0
for num in nums[1:]:
UpperCamelCase , UpperCamelCase = (
max_excluding + num,
max(A__ , A__ ),
)
return max(A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
'''simple docstring'''
def __lowerCamelCase ( A__ = 10**9 ) -> int:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28 | 1 |
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCAmelCase_ ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1)) | 210 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Tuple=10 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Dict=32 * 8 , _lowerCAmelCase : List[str]=32 * 8 , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : Optional[Any]=64 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = min_size
SCREAMING_SNAKE_CASE_ = max_size
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = hidden_dim
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5
).float()
SCREAMING_SNAKE_CASE_ = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long()
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE_ = self.num_queries
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE_ = self.num_channels
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
return config
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = output.encoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_layers )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Any=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = MaskaFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
def comm_check_on_output(_lowerCAmelCase : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(
pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowercase_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowerCAmelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowerCAmelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowerCAmelCase_ ( self : Tuple ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowerCAmelCase_ ( self : Any ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : int ):
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Any ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_lowerCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_lowerCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_lowerCAmelCase ).long(),
}
SCREAMING_SNAKE_CASE_ = self.model_tester.get_config()
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase_ ( self : List[str] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ : Tuple = 1E-4
def UpperCAmelCase_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[int] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase_ ( self : int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE_ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['mask_labels']]
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None ) | 210 | 1 |
def lowerCAmelCase_ ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
return int(input_a == input_a == 0 )
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 281 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case : List[str] = "facebook/wmt19-en-de"
snake_case : Dict = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case : List[str] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case : int = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
snake_case : Optional[Any] = tokenizer(["Making tiny model"], return_tensors="pt")
snake_case : List[str] = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
snake_case : Dict = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 281 | 1 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : UNetaDModel , SCREAMING_SNAKE_CASE_ : UNetaDModel , SCREAMING_SNAKE_CASE_ : DDPMScheduler , SCREAMING_SNAKE_CASE_ : Dict , ) -> Tuple:
'''simple docstring'''
super().__init__()
A: int = value_function
A: int = unet
A: Optional[Any] = scheduler
A: Union[str, Any] = env
A: Union[str, Any] = env.get_dataset()
A: Tuple = {}
for key in self.data.keys():
try:
A: List[str] = self.data[key].mean()
except: # noqa: E722
pass
A: Tuple = {}
for key in self.data.keys():
try:
A: List[str] = self.data[key].std()
except: # noqa: E722
pass
A: Optional[int] = env.observation_space.shape[0]
A: Any = env.action_space.shape[0]
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
'''simple docstring'''
if type(SCREAMING_SNAKE_CASE_ ) is dict:
return {k: self.to_torch(SCREAMING_SNAKE_CASE_ ) for k, v in x_in.items()}
elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ):
return x_in.to(self.unet.device )
return torch.tensor(SCREAMING_SNAKE_CASE_ , device=self.unet.device )
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
for key, val in cond.items():
A: List[Any] = val.clone()
return x_in
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
'''simple docstring'''
A: str = x.shape[0]
A: List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
A: int = torch.full((batch_size,) , SCREAMING_SNAKE_CASE_ , device=self.unet.device , dtype=torch.long )
for _ in range(SCREAMING_SNAKE_CASE_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
A: List[str] = self.value_function(x.permute(0 , 2 , 1 ) , SCREAMING_SNAKE_CASE_ ).sample
A: Optional[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
A: str = self.scheduler._get_variance(SCREAMING_SNAKE_CASE_ )
A: int = torch.exp(0.5 * posterior_variance )
A: Dict = model_std * grad
A: List[Any] = 0
A: List[str] = x.detach()
A: Dict = x + scale * grad
A: Dict = self.reset_xa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.action_dim )
A: Optional[Any] = self.unet(x.permute(0 , 2 , 1 ) , SCREAMING_SNAKE_CASE_ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
A: str = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , predict_epsilon=SCREAMING_SNAKE_CASE_ )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
A: List[str] = self.reset_xa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.action_dim )
A: List[str] = self.to_torch(SCREAMING_SNAKE_CASE_ )
return x, y
def __call__( self : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int=64 , SCREAMING_SNAKE_CASE_ : Optional[Any]=32 , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 ) -> List[Any]:
'''simple docstring'''
A: Dict = self.normalize(SCREAMING_SNAKE_CASE_ , '''observations''' )
A: Tuple = obs[None].repeat(SCREAMING_SNAKE_CASE_ , axis=0 )
A: int = {0: self.to_torch(SCREAMING_SNAKE_CASE_ )}
A: List[str] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
A: Optional[int] = randn_tensor(SCREAMING_SNAKE_CASE_ , device=self.unet.device )
A: str = self.reset_xa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.action_dim )
A: int = self.to_torch(SCREAMING_SNAKE_CASE_ )
# run the diffusion process
A , A: List[str] = self.run_diffusion(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# sort output trajectories by value
A: str = y.argsort(0 , descending=SCREAMING_SNAKE_CASE_ ).squeeze()
A: List[str] = x[sorted_idx]
A: str = sorted_values[:, :, : self.action_dim]
A: Any = actions.detach().cpu().numpy()
A: List[str] = self.de_normalize(SCREAMING_SNAKE_CASE_ , key='''actions''' )
# select the action with the highest value
if y is not None:
A: Union[str, Any] = 0
else:
# if we didn't run value guiding, select a random action
A: List[Any] = np.random.randint(0 , SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = denorm_actions[selected_index, 0]
return denorm_actions
| 334 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
return np.maximum(0 , __lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 334 | 1 |
def __UpperCamelCase ( lowerCAmelCase__ : list ):
def merge(lowerCAmelCase__ : list , lowerCAmelCase__ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowerCAmelCase__ ) <= 1:
return collection
__a : List[str] = len(lowerCAmelCase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ =input('Enter numbers separated by a comma:\n').strip()
lowercase__ =[int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 216 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ =logging.get_logger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ):
__a : List[str] = os.path.abspath(lowerCAmelCase__ )
logger.info(f"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
__a : Tuple = tf.train.list_variables(lowerCAmelCase__ )
__a : Optional[Any] = []
__a : Union[str, Any] = []
__a : str = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__a : Any = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(f"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
__a : Any = name[1:]
# figure out how many levels deep the name is
__a : List[Any] = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(lowerCAmelCase__ )
# read data
__a : Tuple = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__ )
names.append('''/'''.join(lowerCAmelCase__ ) )
arrays.append(lowerCAmelCase__ )
logger.info(f"Read a total of {len(lowerCAmelCase__ ):,} layers" )
# Sanity check
if len(set(lowerCAmelCase__ ) ) != 1:
raise ValueError(f"Found layer names with different depths (layer depth {list(set(lowerCAmelCase__ ) )})" )
__a : int = list(set(lowerCAmelCase__ ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : int = full_name.split('''/''' )
__a : Tuple = model
__a : Dict = []
for i, m_name in enumerate(lowerCAmelCase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
__a : Union[str, Any] = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''embeddings''' )
__a : List[str] = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
__a : Dict = getattr(lowerCAmelCase__ , '''encoder''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''layer''' )
__a : Any = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
__a : Any = getattr(lowerCAmelCase__ , '''pooler''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
__a : int = getattr(lowerCAmelCase__ , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
__a : List[str] = getattr(lowerCAmelCase__ , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''token_type_embeddings''' )
else:
raise ValueError(f"Unknown embedding layer with name {full_name}" )
trace.append('''weight''' )
__a : Tuple = getattr(lowerCAmelCase__ , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''attention''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
__a : int = getattr(lowerCAmelCase__ , '''attention''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''output''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''attention''' )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''output''' )
__a : Any = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
__a : Tuple = getattr(lowerCAmelCase__ , '''output''' )
__a : str = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
__a : int = getattr(lowerCAmelCase__ , '''output''' )
__a : str = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''intermediate''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
__a : int = getattr(lowerCAmelCase__ , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
__a : Dict = getattr(lowerCAmelCase__ , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''weight''' )
else:
logger.warning(f"Ignored {m_name}" )
# for certain layers reshape is necessary
__a : List[str] = '''.'''.join(lowerCAmelCase__ )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , lowerCAmelCase__ ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , lowerCAmelCase__ ):
__a : str = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__a : Optional[Any] = array.transpose()
if pointer.shape == array.shape:
__a : str = torch.from_numpy(lowerCAmelCase__ )
else:
raise ValueError(
f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
f" {array.shape}" )
logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ):
# Instantiate model
logger.info(f"Loading model based on config from {config_path}..." )
__a : Dict = BertConfig.from_json_file(lowerCAmelCase__ )
__a : int = BertModel(lowerCAmelCase__ )
# Load weights from checkpoint
logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
logger.info(f"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
lowercase__ =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 216 | 1 |
"""simple docstring"""
import sys
from pathlib import Path
__lowerCamelCase = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__lowerCamelCase = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
__lowerCamelCase = "zero2"
__lowerCamelCase = "zero3"
__lowerCamelCase = [ZEROa, ZEROa]
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = parameterized.to_safe_name('_'.join(str(UpperCamelCase__ ) for x in param.args ) )
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
__lowerCamelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class UpperCamelCase__( __A ):
@parameterized.expand(__UpperCAmelCase ,name_func=__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
self.run_and_check(
stage=__UpperCAmelCase ,model=__UpperCAmelCase ,distributed=__UpperCAmelCase ,fpaa=__UpperCAmelCase ,)
@require_torch_multi_gpu
@parameterized.expand(__UpperCAmelCase ,name_func=__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
self.run_and_check(
stage=__UpperCAmelCase ,model=__UpperCAmelCase ,distributed=__UpperCAmelCase ,fpaa=__UpperCAmelCase ,)
@parameterized.expand(__UpperCAmelCase ,name_func=__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
self.run_and_check(
stage=__UpperCAmelCase ,model=__UpperCAmelCase ,distributed=__UpperCAmelCase ,fpaa=__UpperCAmelCase ,)
@require_torch_multi_gpu
@parameterized.expand(__UpperCAmelCase ,name_func=__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
self.run_and_check(
stage=__UpperCAmelCase ,model=__UpperCAmelCase ,distributed=__UpperCAmelCase ,fpaa=__UpperCAmelCase ,)
def snake_case__ ( self ,__UpperCAmelCase ) -> List[str]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 10 ,__UpperCAmelCase = True ,__UpperCAmelCase = True ,__UpperCAmelCase = True ,) -> Optional[int]:
A__ = models[model]
A__ = self.run_trainer(
stage=__UpperCAmelCase ,model_name=__UpperCAmelCase ,eval_steps=__UpperCAmelCase ,num_train_epochs=1 ,distributed=__UpperCAmelCase ,fpaa=__UpperCAmelCase ,)
self.do_checks(__UpperCAmelCase )
return output_dir
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 10 ,__UpperCAmelCase = 1 ,__UpperCAmelCase = True ,__UpperCAmelCase = True ,) -> List[Any]:
A__ = self.get_auto_remove_tmp_dir('./xxx' ,after=__UpperCAmelCase )
A__ = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__UpperCAmelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A__ = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
A__ = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
A__ = self.get_launcher(__UpperCAmelCase )
A__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__UpperCAmelCase ,env=self.get_env() )
return output_dir
def snake_case__ ( self ,__UpperCAmelCase=False ) -> Union[str, Any]:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
A__ = min(2 ,get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 350 | """simple docstring"""
__lowerCamelCase = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__lowerCamelCase = [{"type": "code", "content": INSTALL_CONTENT}]
__lowerCamelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 154 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
UpperCAmelCase_ = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase__ = random.Random()
UpperCAmelCase__ = 1
for dim in shape:
total_dims *= dim
UpperCAmelCase__ = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCAmelCase__ = np.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.intaa ).reshape(SCREAMING_SNAKE_CASE__ )
return output
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None ):
'''simple docstring'''
UpperCAmelCase__ = ids_tensor(SCREAMING_SNAKE_CASE__ , vocab_size=2 , rng=SCREAMING_SNAKE_CASE__ )
# make sure that at least one token is attended to for each batch
UpperCAmelCase__ = 1
return attn_mask
@require_flax
class lowerCAmelCase_ :
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : str = ()
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCAmelCase__ = 2
UpperCAmelCase__ = inputs["""input_ids"""].shape[-1] // 2
UpperCAmelCase__ = inputs["""input_ids"""][:max_batch_size, :sequence_length]
UpperCAmelCase__ = jnp.ones_like(_UpperCAmelCase )
UpperCAmelCase__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCAmelCase__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCAmelCase__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
UpperCAmelCase__ = False
UpperCAmelCase__ = max_length
UpperCAmelCase__ = 0
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
UpperCAmelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = pt_model_class(_UpperCAmelCase ).eval()
UpperCAmelCase__ = load_flax_weights_in_pytorch_model(_UpperCAmelCase , flax_model.params )
UpperCAmelCase__ = flax_model.generate(_UpperCAmelCase ).sequences
UpperCAmelCase__ = pt_model.generate(torch.tensor(_UpperCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCAmelCase__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
UpperCAmelCase__ = False
UpperCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
UpperCAmelCase__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
UpperCAmelCase__ = True
UpperCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
UpperCAmelCase__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
UpperCAmelCase__ = False
UpperCAmelCase__ = max_length
UpperCAmelCase__ = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
UpperCAmelCase__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
UpperCAmelCase__ = False
UpperCAmelCase__ = max_length
UpperCAmelCase__ = 2
UpperCAmelCase__ = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
UpperCAmelCase__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
UpperCAmelCase__ = True
UpperCAmelCase__ = max_length
UpperCAmelCase__ = 0.8
UpperCAmelCase__ = 10
UpperCAmelCase__ = 0.3
UpperCAmelCase__ = 1
UpperCAmelCase__ = 8
UpperCAmelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
UpperCAmelCase__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
UpperCAmelCase__ = max_length
UpperCAmelCase__ = 1
UpperCAmelCase__ = 8
UpperCAmelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
UpperCAmelCase__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
UpperCAmelCase__ = max_length
UpperCAmelCase__ = 2
UpperCAmelCase__ = 1
UpperCAmelCase__ = 8
UpperCAmelCase__ = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
UpperCAmelCase__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase__ = False
UpperCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
UpperCAmelCase__ = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase__ = True
UpperCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
UpperCAmelCase__ = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase__ = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase__ = 2
UpperCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
UpperCAmelCase__ = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
UpperCAmelCase__ = jit(model.generate )
UpperCAmelCase__ = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
UpperCAmelCase__ = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCAmelCase__ = """Hello world"""
UpperCAmelCase__ = tokenizer(_UpperCAmelCase , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_UpperCAmelCase , """do_samples""" ):
model.generate(_UpperCAmelCase , do_samples=_UpperCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_UpperCAmelCase , """foo""" ):
UpperCAmelCase__ = {"""foo""": """bar"""}
model.generate(_UpperCAmelCase , **_UpperCAmelCase )
| 346 |
'''simple docstring'''
import string
from math import logaa
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
UpperCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase__ = corpus_without_punctuation.split("""\n""" )
UpperCAmelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ ))
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return round(tf * idf , 3 )
| 346 | 1 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase: Tuple = "▁"
_lowercase: Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _lowercase ( A_, unittest.TestCase ):
"""simple docstring"""
__A = BigBirdTokenizer
__A = BigBirdTokenizerFast
__A = True
__A = True
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
a = self.tokenizer_class(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "<s>"
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(_lowerCamelCase ) , 1004 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase_ (self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = "I was born in 92000, and this is falsé."
a = tokenizer.tokenize(_lowerCamelCase )
a = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
a = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a = self.get_rust_tokenizer()
a = tokenizer.encode(_lowerCamelCase )
a = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = BigBirdTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase_ (self ):
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "Hello World!"
a = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
a = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@require_torch
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
a = list(self.big_tokenizer.get_vocab().keys() )[:10]
a = " ".join(_lowerCamelCase )
a = self.big_tokenizer.encode_plus(_lowerCamelCase , return_tensors="pt" , return_token_type_ids=_lowerCamelCase )
a = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_lowerCamelCase )
a = BigBirdConfig(attention_type="original_full" )
a = BigBirdModel(_lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCamelCase )
model(**_lowerCamelCase )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
a = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = {"input_ids": [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 368 |
import math
from numpy import inf
from scipy.integrate import quad
def a( A : float ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
return quad(A , 0 , A , args=(A) )[0]
def a( A : float , A : float ) -> float:
"""simple docstring"""
return math.pow(A , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 71 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class __UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__lowerCAmelCase = '''xlnet'''
__lowerCAmelCase = ['''mems''']
__lowerCAmelCase = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self : List[str] , _lowerCAmelCase : Tuple=3_2000 , _lowerCAmelCase : Tuple=1024 , _lowerCAmelCase : Union[str, Any]=24 , _lowerCAmelCase : Tuple=16 , _lowerCAmelCase : List[Any]=4096 , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Tuple="bi" , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : List[Any]=1e-12 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : Optional[Any]=512 , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : int=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : List[Any]=-1 , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Optional[int]="last" , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[Any]="tanh" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Dict=5 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Dict=2 , **_lowerCAmelCase : Optional[Any] , ):
A = vocab_size
A = d_model
A = n_layer
A = n_head
if d_model % n_head != 0:
raise ValueError(F"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
A = d_model // n_head
A = ff_activation
A = d_inner
A = untie_r
A = attn_type
A = initializer_range
A = layer_norm_eps
A = dropout
A = mem_len
A = reuse_len
A = bi_data
A = clamp_len
A = same_length
A = summary_type
A = summary_use_proj
A = summary_activation
A = summary_last_dropout
A = start_n_top
A = end_n_top
A = bos_token_id
A = pad_token_id
A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , __lowerCAmelCase , )
A = kwargs["""use_cache"""]
A = use_mems_eval
A = use_mems_train
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def A (self : Optional[Any] ):
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A (self : Any , _lowerCAmelCase : Optional[Any] ):
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 258 | '''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | 0 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase ( A, unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def A_ ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A_ ( self : int ):
UpperCamelCase__ = ort.SessionOptions()
UpperCamelCase__ = False
return options
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
UpperCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
UpperCamelCase__ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase__ = '''A red cat sitting on a park bench'''
UpperCamelCase__ = np.random.RandomState(0 )
UpperCamelCase__ = pipe(
prompt=_a , image=_a , mask_image=_a , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type='''np''' , )
UpperCamelCase__ = output.images
UpperCamelCase__ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase__ = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def A_ ( self : int ):
UpperCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
UpperCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
UpperCamelCase__ = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
UpperCamelCase__ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase__ = '''A red cat sitting on a park bench'''
UpperCamelCase__ = np.random.RandomState(0 )
UpperCamelCase__ = pipe(
prompt=_a , image=_a , mask_image=_a , guidance_scale=7.5 , num_inference_steps=20 , generator=_a , output_type='''np''' , )
UpperCamelCase__ = output.images
UpperCamelCase__ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase__ = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 35 | def lowerCamelCase_ ( UpperCamelCase__ : list[int], UpperCamelCase__ : list[int], UpperCamelCase__ : int ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCamelCase__ ) )
def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]], UpperCamelCase__ : int, UpperCamelCase__ : list[int], UpperCamelCase__ : int ):
'''simple docstring'''
if index == len(UpperCamelCase__ ):
return True
# Recursive Step
for i in range(UpperCamelCase__ ):
if valid_coloring(graph[index], UpperCamelCase__, UpperCamelCase__ ):
# Color current vertex
UpperCamelCase__ = i
# Validate coloring
if util_color(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, index + 1 ):
return True
# Backtrack
UpperCamelCase__ = -1
return False
def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]], UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = [-1] * len(UpperCamelCase__ )
if util_color(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, 0 ):
return colored_vertices
return []
| 35 | 1 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[str]:
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[Any]:
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = metric_id
class __UpperCAmelCase :
__lowercase = [MetricMock(_lowerCamelCase ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def lowerCamelCase ( self ):
"""simple docstring"""
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Union[str, Any]:
if "tmp_path" in args:
_snake_case = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(__A , match='https://huggingface.co/docs/evaluate' ):
func(*__A )
| 42 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """xlnet"""
__lowercase = ["""mems"""]
__lowercase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase_=3_20_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=40_96 , lowerCAmelCase_="gelu" , lowerCAmelCase_=True , lowerCAmelCase_="bi" , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=-1 , lowerCAmelCase_=False , lowerCAmelCase_="last" , lowerCAmelCase_=True , lowerCAmelCase_="tanh" , lowerCAmelCase_=0.1 , lowerCAmelCase_=5 , lowerCAmelCase_=5 , lowerCAmelCase_=5 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = d_model
_snake_case = n_layer
_snake_case = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
_snake_case = d_model // n_head
_snake_case = ff_activation
_snake_case = d_inner
_snake_case = untie_r
_snake_case = attn_type
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = dropout
_snake_case = mem_len
_snake_case = reuse_len
_snake_case = bi_data
_snake_case = clamp_len
_snake_case = same_length
_snake_case = summary_type
_snake_case = summary_use_proj
_snake_case = summary_activation
_snake_case = summary_last_dropout
_snake_case = start_n_top
_snake_case = end_n_top
_snake_case = bos_token_id
_snake_case = pad_token_id
_snake_case = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , lowerCAmelCase_ , )
_snake_case = kwargs['use_cache']
_snake_case = use_mems_eval
_snake_case = use_mems_train
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 42 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A ( lowerCAmelCase ):
def A__ ( self ):
"""simple docstring"""
lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__lowerCAmelCase , """num_heads""" ) )
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=64 , __lowerCAmelCase=3 , __lowerCAmelCase=[16, 48, 96] , __lowerCAmelCase=[1, 3, 6] , __lowerCAmelCase=[1, 2, 10] , __lowerCAmelCase=[7, 3, 3] , __lowerCAmelCase=[4, 2, 2] , __lowerCAmelCase=[2, 1, 1] , __lowerCAmelCase=[2, 2, 2] , __lowerCAmelCase=[False, False, True] , __lowerCAmelCase=[0.0, 0.0, 0.0] , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=2 , ):
"""simple docstring"""
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = patch_sizes
lowercase = patch_stride
lowercase = patch_padding
lowercase = is_training
lowercase = use_labels
lowercase = num_labels
lowercase = num_channels
lowercase = embed_dim
lowercase = num_heads
lowercase = stride_kv
lowercase = depth
lowercase = cls_token
lowercase = attention_drop_rate
lowercase = initializer_range
lowercase = layer_norm_eps
def A__ ( self ):
"""simple docstring"""
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
# create a random int32 tensor of given shape
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = TFCvtModel(config=__lowerCAmelCase )
lowercase = model(__lowerCAmelCase , training=__lowerCAmelCase )
lowercase = (self.image_size, self.image_size)
lowercase , lowercase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowercase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowercase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.num_labels
lowercase = TFCvtForImageClassification(__lowerCAmelCase )
lowercase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
"""simple docstring"""
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
snake_case__ : int = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
snake_case__ : Dict = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
snake_case__ : List[Any] = False
snake_case__ : int = False
snake_case__ : Tuple = False
snake_case__ : Any = False
snake_case__ : int = False
def A__ ( self ):
"""simple docstring"""
lowercase = TFCvtModelTester(self )
lowercase = TFCvtConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A__ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def A__ ( self ):
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def A__ ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def A__ ( self ):
"""simple docstring"""
lowercase = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__lowerCAmelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(__lowerCAmelCase )
lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowercase = model_class(__lowerCAmelCase )
lowercase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = len(self.model_tester.depth )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = TFCvtModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def UpperCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def A__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=__lowerCAmelCase , return_tensors="""tf""" )
# forward pass
lowercase = model(**__lowerCAmelCase )
# verify the logits
lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowercase = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCAmelCase , atol=1E-4 ) )
| 32 | """simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'mask2former'
snake_case__ : Union[str, Any] = ['swin']
snake_case__ : Any = {'hidden_size': 'hidden_dim'}
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 1024 , __lowerCAmelCase = "relu" , __lowerCAmelCase = 6 , __lowerCAmelCase = 10 , __lowerCAmelCase = 8 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 2048 , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = 4 , __lowerCAmelCase = 255 , __lowerCAmelCase = 100 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 1_2544 , __lowerCAmelCase = 3.0 , __lowerCAmelCase = 0.7_5 , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = [4, 8, 16, 32] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowercase = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.pop("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
lowercase = backbone_config
lowercase = feature_size
lowercase = mask_feature_size
lowercase = hidden_dim
lowercase = encoder_feedforward_dim
lowercase = activation_function
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = num_attention_heads
lowercase = dropout
lowercase = dim_feedforward
lowercase = pre_norm
lowercase = enforce_input_projection
lowercase = common_stride
lowercase = ignore_value
lowercase = num_queries
lowercase = no_object_weight
lowercase = class_weight
lowercase = mask_weight
lowercase = dice_weight
lowercase = train_num_points
lowercase = oversample_ratio
lowercase = importance_sample_ratio
lowercase = init_std
lowercase = init_xavier_std
lowercase = use_auxiliary_loss
lowercase = feature_strides
lowercase = output_auxiliary_logits
lowercase = decoder_layers
super().__init__(**__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return cls(
backbone_config=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 32 | 1 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , **UpperCAmelCase , ) -> str:
super().__init__(features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , **UpperCAmelCase )
_lowercase =Sql(
cache_dir=UpperCAmelCase , features=UpperCAmelCase , sql=UpperCAmelCase , con=UpperCAmelCase , **UpperCAmelCase , )
def __A (self ) -> Dict:
_lowercase =None
_lowercase =None
_lowercase =None
_lowercase =None
self.builder.download_and_prepare(
download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , )
# Build dataset for splits
_lowercase =self.builder.as_dataset(
split='''train''' , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> Optional[int]:
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
_lowercase =dataset
_lowercase =name
_lowercase =con
_lowercase =batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowercase =num_proc
_lowercase =to_sql_kwargs
def __A (self ) -> int:
_lowercase =self.to_sql_kwargs.pop('''sql''' , UpperCAmelCase )
_lowercase =self.to_sql_kwargs.pop('''con''' , UpperCAmelCase )
_lowercase =self.to_sql_kwargs.pop('''index''' , UpperCAmelCase )
_lowercase =self._write(index=UpperCAmelCase , **self.to_sql_kwargs )
return written
def __A (self , UpperCAmelCase ) -> Optional[Any]:
_lowercase , _lowercase , _lowercase =args
_lowercase ={**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
_lowercase =query_table(
table=self.dataset.data , key=slice(UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_lowercase =batch.to_pandas()
_lowercase =df.to_sql(self.name , self.con , index=UpperCAmelCase , **UpperCAmelCase )
return num_rows or len(UpperCAmelCase )
def __A (self , UpperCAmelCase , **UpperCAmelCase ) -> int:
_lowercase =0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_lowercase , _lowercase =len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCAmelCase , UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 5 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
lowerCAmelCase_ : Any = list[tuple[int, int]]
lowerCAmelCase_ : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase_ : List[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = pos_x
_UpperCAmelCase : List[Any] = pos_y
_UpperCAmelCase : Union[str, Any] = (pos_y, pos_x)
_UpperCAmelCase : Optional[int] = goal_x
_UpperCAmelCase : Optional[Any] = goal_y
_UpperCAmelCase : int = parent
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : str = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCAmelCase__ )
_UpperCAmelCase : str = [self.start]
_UpperCAmelCase : Union[str, Any] = False
def snake_case_ (self ):
while self.node_queue:
_UpperCAmelCase : Dict = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_UpperCAmelCase : List[Any] = True
return self.retrace_path(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self.get_successors(lowerCAmelCase__ )
for node in successors:
self.node_queue.append(lowerCAmelCase__ )
if not self.reached:
return [self.start.pos]
return None
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = []
for action in delta:
_UpperCAmelCase : List[str] = parent.pos_x + action[1]
_UpperCAmelCase : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCAmelCase__ , lowerCAmelCase__ , self.target.pos_y , self.target.pos_x , lowerCAmelCase__ ) )
return successors
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Any = node
_UpperCAmelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCAmelCase : Dict = current_node.parent
path.reverse()
return path
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = BreadthFirstSearch(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = BreadthFirstSearch(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = False
def snake_case_ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_UpperCAmelCase : Optional[Any] = self.fwd_bfs.node_queue.pop(0 )
_UpperCAmelCase : Dict = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_UpperCAmelCase : List[Any] = True
return self.retrace_bidirectional_path(
lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = current_bwd_node
_UpperCAmelCase : str = current_fwd_node
_UpperCAmelCase : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCAmelCase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = self.fwd_bfs.retrace_path(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = self.bwd_bfs.retrace_path(lowerCAmelCase__ )
bwd_path.pop()
bwd_path.reverse()
_UpperCAmelCase : Union[str, Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCAmelCase_ : int = (0, 0)
lowerCAmelCase_ : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCAmelCase_ : List[Any] = time.time()
lowerCAmelCase_ : Dict = BreadthFirstSearch(init, goal)
lowerCAmelCase_ : Optional[int] = bfs.search()
lowerCAmelCase_ : str = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
lowerCAmelCase_ : Union[str, Any] = time.time()
lowerCAmelCase_ : List[str] = BidirectionalBreadthFirstSearch(init, goal)
lowerCAmelCase_ : str = bd_bfs.search()
lowerCAmelCase_ : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 170 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( __a ):
def snake_case_ (self ):
_UpperCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """depth_multiplier""" ) )
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3 , lowerCAmelCase__=3_2 , lowerCAmelCase__=0.2_5 , lowerCAmelCase__=8 , lowerCAmelCase__=True , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=3_2 , lowerCAmelCase__="relu6" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=1_0 , lowerCAmelCase__=None , ):
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : int = image_size
_UpperCAmelCase : List[str] = depth_multiplier
_UpperCAmelCase : Any = min_depth
_UpperCAmelCase : Dict = tf_padding
_UpperCAmelCase : Dict = int(last_hidden_size * depth_multiplier )
_UpperCAmelCase : Dict = output_stride
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Optional[Any] = classifier_dropout_prob
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : int = num_labels
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : Any = scope
def snake_case_ (self ):
_UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : int = None
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ (self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = self.num_labels
_UpperCAmelCase : Tuple = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ (self ):
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = config_and_inputs
_UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __a , __a , unittest.TestCase ):
snake_case : Tuple = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case : Optional[Any] = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case : str = False
snake_case : str = False
snake_case : Optional[Any] = False
snake_case : Optional[int] = False
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = MobileNetVaModelTester(self )
_UpperCAmelCase : str = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def snake_case_ (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def snake_case_ (self ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def snake_case_ (self ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def snake_case_ (self ):
pass
def snake_case_ (self ):
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = model_class(lowerCAmelCase__ )
_UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : str = [*signature.parameters.keys()]
_UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case_ (self ):
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[Any] = outputs.hidden_states
_UpperCAmelCase : Tuple = 2_6
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def snake_case_ (self ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[str] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __A ( ):
_UpperCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case_ (self ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = self.default_image_processor
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCAmelCase__ )
# verify the logits
_UpperCAmelCase : Optional[int] = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCAmelCase : str = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 170 | 1 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : str= LxmertTokenizer
_a : Optional[Any]= LxmertTokenizerFast
_a : List[Any]= True
_a : int= True
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
lowercase : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : str = """UNwant\u00E9d,running"""
lowercase : List[Any] = """unwanted, running"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = self.tokenizer_class(self.vocab_file )
lowercase : Any = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(snake_case ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) ,[7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Optional[int] = self.get_rust_tokenizer()
lowercase : str = """I was born in 92000, and this is falsé."""
lowercase : Optional[Any] = tokenizer.tokenize(snake_case )
lowercase : Tuple = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case ,snake_case )
lowercase : Tuple = tokenizer.encode(snake_case ,add_special_tokens=snake_case )
lowercase : int = rust_tokenizer.encode(snake_case ,add_special_tokens=snake_case )
self.assertListEqual(snake_case ,snake_case )
lowercase : Optional[Any] = self.get_rust_tokenizer()
lowercase : Any = tokenizer.encode(snake_case )
lowercase : Union[str, Any] = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case ,snake_case )
| 20 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if "cls_token" in name:
lowercase : List[Any] = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
lowercase : Any = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
lowercase : str = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowercase : List[str] = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowercase : Tuple = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase : int = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
lowercase : Tuple = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowercase : List[Any] = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
lowercase : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase : Union[str, Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowercase : List[str] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowercase : Dict = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowercase : List[str] = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
lowercase : Tuple = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
lowercase : int = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
for key in orig_state_dict.copy().keys():
lowercase : List[Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
lowercase : int = key.split(""".""" )
lowercase : List[str] = int(key_split[1] )
if "decoder_blocks" in key:
lowercase : Tuple = config.decoder_hidden_size
lowercase : int = """decoder.decoder_layers."""
if "weight" in key:
lowercase : List[Any] = val[:dim, :]
lowercase : Tuple = val[dim : dim * 2, :]
lowercase : List[Any] = val[-dim:, :]
elif "bias" in key:
lowercase : str = val[:dim]
lowercase : Dict = val[dim : dim * 2]
lowercase : Union[str, Any] = val[-dim:]
else:
lowercase : Tuple = config.hidden_size
lowercase : Union[str, Any] = """vit.encoder.layer."""
if "weight" in key:
lowercase : Tuple = val[:dim, :]
lowercase : List[str] = val[dim : dim * 2, :]
lowercase : Dict = val[-dim:, :]
elif "bias" in key:
lowercase : Any = val[:dim]
lowercase : str = val[dim : dim * 2]
lowercase : Union[str, Any] = val[-dim:]
else:
lowercase : Union[str, Any] = val
return orig_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : int = ViTMAEConfig()
if "large" in checkpoint_url:
lowercase : Dict = 1_024
lowercase : str = 4_096
lowercase : Optional[Any] = 24
lowercase : Optional[Any] = 16
elif "huge" in checkpoint_url:
lowercase : int = 14
lowercase : List[Any] = 1_280
lowercase : int = 5_120
lowercase : List[Any] = 32
lowercase : Any = 16
lowercase : List[str] = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
lowercase : Tuple = ViTMAEImageProcessor(size=config.image_size )
lowercase : Optional[int] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Union[str, Any] = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
lowercase : Union[str, Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
lowercase : Optional[Any] = ViTMAEImageProcessor(size=config.image_size )
lowercase : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**SCREAMING_SNAKE_CASE__ )
lowercase : str = outputs.logits
if "large" in checkpoint_url:
lowercase : List[Any] = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
lowercase : Tuple = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
lowercase : List[str] = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase : List[Any] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 20 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
_A = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
_A = tempfile.mkdtemp()
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
# load decoder from hub
_A = """hf-internal-testing/ngram-beam-search-decoder"""
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Dict:
_A = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Dict:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> List[Any]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.get_tokenizer()
_A = self.get_feature_extractor()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
_A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> str:
_A = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_A = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(lowerCAmelCase_ , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = floats_list((3, 10_00) )
_A = feature_extractor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = """This is a test string"""
_A = processor(text=lowerCAmelCase_ )
_A = tokenizer(lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self , lowerCAmelCase_=(2, 10, 16) , lowerCAmelCase_=77 ) -> Optional[Any]:
np.random.seed(lowerCAmelCase_ )
return np.random.rand(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> str:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_A = processor.decode(lowerCAmelCase_ )
_A = decoder.decode_beams(lowerCAmelCase_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_A = processor.batch_decode(lowerCAmelCase_ )
else:
with get_context(lowerCAmelCase_ ).Pool() as pool:
_A = processor.batch_decode(lowerCAmelCase_ , lowerCAmelCase_ )
_A = list(lowerCAmelCase_ )
with get_context("""fork""" ).Pool() as p:
_A = decoder.decode_beams_batch(lowerCAmelCase_ , lowerCAmelCase_ )
_A , _A , _A = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.logit_score )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.lm_score )
def UpperCAmelCase ( self ) -> int:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits()
_A = 15
_A = -20.0
_A = -4.0
_A = processor.batch_decode(
lowerCAmelCase_ , beam_width=lowerCAmelCase_ , beam_prune_logp=lowerCAmelCase_ , token_min_logp=lowerCAmelCase_ , )
_A = decoded_processor_out.text
_A = list(lowerCAmelCase_ )
with get_context("""fork""" ).Pool() as pool:
_A = decoder.decode_beams_batch(
lowerCAmelCase_ , lowerCAmelCase_ , beam_width=lowerCAmelCase_ , beam_prune_logp=lowerCAmelCase_ , token_min_logp=lowerCAmelCase_ , )
_A = [d[0][0] for d in decoded_decoder_out]
_A = [d[0][2] for d in decoded_decoder_out]
_A = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , lowerCAmelCase_ )
self.assertTrue(np.array_equal(lowerCAmelCase_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , lowerCAmelCase_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(lowerCAmelCase_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , lowerCAmelCase_ , atol=1E-3 ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits()
_A = 2.0
_A = 5.0
_A = -20.0
_A = True
_A = processor.batch_decode(
lowerCAmelCase_ , alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , unk_score_offset=lowerCAmelCase_ , lm_score_boundary=lowerCAmelCase_ , )
_A = decoded_processor_out.text
_A = list(lowerCAmelCase_ )
decoder.reset_params(
alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , unk_score_offset=lowerCAmelCase_ , lm_score_boundary=lowerCAmelCase_ , )
with get_context("""fork""" ).Pool() as pool:
_A = decoder.decode_beams_batch(
lowerCAmelCase_ , lowerCAmelCase_ , )
_A = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , lowerCAmelCase_ )
_A = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[Any]:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_A = os.listdir(lowerCAmelCase_ )
_A = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = snapshot_download("""hf-internal-testing/processor_with_lm""" )
_A = WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase_ )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_A = os.listdir(lowerCAmelCase_ )
_A = os.listdir(lowerCAmelCase_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = floats_list((3, 10_00) )
_A = processor_wavaveca(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor_auto(lowerCAmelCase_ , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_A = self._get_dummy_logits()
_A = processor_wavaveca.batch_decode(lowerCAmelCase_ )
_A = processor_auto.batch_decode(lowerCAmelCase_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCAmelCase ( self ) -> Any:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_A = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase ( self ) -> Dict:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = self._get_dummy_logits()[0]
_A = processor.decode(lowerCAmelCase_ , output_word_offsets=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def UpperCAmelCase ( self ) -> List[Any]:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = self._get_dummy_logits()
_A = processor.batch_decode(lowerCAmelCase_ , output_word_offsets=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(lowerCAmelCase_ , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCAmelCase ( self ) -> Dict:
import torch
_A = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=lowerCAmelCase_ )
_A = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
_A = iter(lowerCAmelCase_ )
_A = next(lowerCAmelCase_ )
_A = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
_A = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_A = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
_A = model(lowerCAmelCase_ ).logits.cpu().numpy()
_A = processor.decode(logits[0] , output_word_offsets=lowerCAmelCase_ )
_A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_A = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
_A = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(lowerCAmelCase_ , """word""" ) ) , lowerCAmelCase_ )
self.assertEqual(""" """.join(self.get_from_offsets(lowerCAmelCase_ , """word""" ) ) , output.text )
# output times
_A = torch.tensor(self.get_from_offsets(lowerCAmelCase_ , """start_time""" ) )
_A = torch.tensor(self.get_from_offsets(lowerCAmelCase_ , """end_time""" ) )
# fmt: off
_A = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_A = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=0.01 ) )
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=0.01 ) )
| 81 | def snake_case ( snake_case__ :int , snake_case__ :int) -> str:
return "\n".join(
F'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1))
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 81 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self : List[Any] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 2_5_5 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : bool = True , **_lowerCAmelCase : int , ) -> None:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
snake_case_ = size if size is not None else {"shortest_edge": 2_2_4}
snake_case_ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
snake_case_ = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
snake_case_ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase , param_name="crop_size" )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ = do_convert_rgb
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
snake_case_ = get_resize_output_image_size(_lowerCAmelCase , size=size["shortest_edge"] , default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Any , ) -> np.ndarray:
"""simple docstring"""
snake_case_ = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[int, float] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : int = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowerCAmelCase : Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(_lowerCAmelCase , param_name="size" , default_to_square=_lowerCAmelCase )
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(_lowerCAmelCase , param_name="crop_size" , default_to_square=_lowerCAmelCase )
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(_lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
snake_case_ = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
snake_case_ = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
snake_case_ = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 159 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _lowerCAmelCase ( )->Any:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=lowerCAmelCase_ , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=lowerCAmelCase_ , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=lowerCAmelCase_ , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=lowerCAmelCase_ , default=0 , help="cuda_id." , )
snake_case_ = parser.parse_args()
return args
def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :Union[str, Any] )->Union[str, Any]:
'''simple docstring'''
if not len(lowerCAmelCase_ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
snake_case_ , snake_case_ = imgs[0].size
snake_case_ = Image.new("RGB" , size=(cols * w, rows * h) )
snake_case_ , snake_case_ = grid.size
for i, img in enumerate(lowerCAmelCase_ ):
grid.paste(lowerCAmelCase_ , box=(i % cols * w, i // cols * h) )
return grid
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Union[str, Any]="robotic cat with wings" , lowerCAmelCase_ :Any=7.5 , lowerCAmelCase_ :Dict=50 , lowerCAmelCase_ :int=1 , lowerCAmelCase_ :Union[str, Any]=42 , )->str:
'''simple docstring'''
snake_case_ = torch.Generator(pipeline.device ).manual_seed(lowerCAmelCase_ )
snake_case_ = pipeline(
lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_images_per_prompt=lowerCAmelCase_ , ).images
snake_case_ = int(math.sqrt(lowerCAmelCase_ ) )
snake_case_ = image_grid(lowerCAmelCase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
SCREAMING_SNAKE_CASE :Dict = parse_args()
# Load models and create wrapper for stable diffusion
SCREAMING_SNAKE_CASE :Optional[int] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
SCREAMING_SNAKE_CASE :Tuple = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
SCREAMING_SNAKE_CASE :List[str] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
SCREAMING_SNAKE_CASE :Optional[int] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
SCREAMING_SNAKE_CASE :List[Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
SCREAMING_SNAKE_CASE :Dict = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
SCREAMING_SNAKE_CASE :Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = unet.to(torch.device('''cuda''', args.cuda_id))
SCREAMING_SNAKE_CASE :Optional[int] = pipeline.to(unet.device)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Optional[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
SCREAMING_SNAKE_CASE :Optional[Any] = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 159 | 1 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __A :
def __init__( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]=14 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int=None , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : List[Any] = use_token_type_ids
lowerCAmelCase : Any = use_input_mask
lowerCAmelCase : List[Any] = use_labels
lowerCAmelCase : List[Any] = use_mc_token_ids
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = max_position_embeddings
lowerCAmelCase : List[str] = type_vocab_size
lowerCAmelCase : Optional[int] = type_sequence_label_size
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Union[str, Any] = num_labels
lowerCAmelCase : Union[str, Any] = num_choices
lowerCAmelCase : List[str] = scope
lowerCAmelCase : List[str] = self.vocab_size - 1
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[Any] = None
if self.use_mc_token_ids:
lowerCAmelCase : str = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = None
if self.use_labels:
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : int = self.get_config()
lowerCAmelCase : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : List[str] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowercase__ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , *UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : Optional[Any] = CTRLModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , *UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = CTRLLMHeadModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Tuple = config_and_inputs
lowerCAmelCase : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : Union[str, Any] = CTRLForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __A ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase_ : List[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCAmelCase_ : str = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : str = False
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = CTRLModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=UpperCAmelCase_ , n_embd=37 )
def lowercase__ ( self : Tuple ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCAmelCase_ )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase__ ( self : Tuple ):
pass
@slow
def lowercase__ ( self : List[str] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = CTRLModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : List[str] ):
pass
@require_torch
class __A ( unittest.TestCase ):
def lowercase__ ( self : Dict ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : str = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(UpperCAmelCase_ )
lowerCAmelCase : List[str] = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=UpperCAmelCase_ ) # Legal the president is
lowerCAmelCase : Dict = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCAmelCase : int = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase_ )
| 323 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase : Tuple = {'unk_token': '<unk>'}
lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Any , **UpperCAmelCase_ : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : List[str] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : int = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : Dict = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = 'lower newer'
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = 'lower newer'
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : Union[str, Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Any = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 323 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : Optional[int] = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class __snake_case ( lowerCAmelCase ):
_a : Any= "camembert"
def __init__( self ,snake_case=30522 ,snake_case=768 ,snake_case=12 ,snake_case=12 ,snake_case=3072 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=2 ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=1 ,snake_case=0 ,snake_case=2 ,snake_case="absolute" ,snake_case=True ,snake_case=None ,**snake_case ,):
'''simple docstring'''
super().__init__(pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,**snake_case )
lowercase : List[Any] = vocab_size
lowercase : Tuple = hidden_size
lowercase : Union[str, Any] = num_hidden_layers
lowercase : List[str] = num_attention_heads
lowercase : Optional[Any] = hidden_act
lowercase : Tuple = intermediate_size
lowercase : Any = hidden_dropout_prob
lowercase : List[str] = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : Union[str, Any] = initializer_range
lowercase : Tuple = layer_norm_eps
lowercase : Dict = position_embedding_type
lowercase : Union[str, Any] = use_cache
lowercase : Optional[int] = classifier_dropout
class __snake_case ( lowerCAmelCase ):
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 20 | import csv
import tweepy
# Twitter API credentials
__a : Union[str, Any] = """"""
__a : Union[str, Any] = """"""
__a : Union[str, Any] = """"""
__a : List[Any] = """"""
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = tweepy.OAuthHandler(lowercase , lowercase )
auth.set_access_token(lowercase , lowercase )
__lowercase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
__lowercase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowercase = api.user_timeline(screen_name=lowercase , count=200 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
__lowercase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__lowercase = api.user_timeline(
screen_name=lowercase , count=200 , max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
__lowercase = alltweets[-1].id - 1
print(F"...{len(lowercase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowercase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , '''w''' ) as f:
__lowercase = csv.writer(lowercase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""") | 210 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Any , a__ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
"""simple docstring"""
super().__init__()
__snake_case = nn.ModuleList(a__ )
def a (self : Tuple , a__ : torch.FloatTensor , a__ : Union[torch.Tensor, float, int] , a__ : torch.Tensor , a__ : List[torch.tensor] , a__ : List[float] , a__ : Optional[torch.Tensor] = None , a__ : Optional[torch.Tensor] = None , a__ : Optional[torch.Tensor] = None , a__ : Optional[Dict[str, Any]] = None , a__ : bool = False , a__ : bool = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a__ , a__ , self.nets ) ):
__snake_case , __snake_case = controlnet(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
# merge samples
if i == 0:
__snake_case , __snake_case = down_samples, mid_sample
else:
__snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a__ , a__ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def a (self : Union[str, Any] , a__ : Union[str, os.PathLike] , a__ : bool = True , a__ : Callable = None , a__ : bool = False , a__ : Optional[str] = None , ):
"""simple docstring"""
__snake_case = 0
__snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a__ , is_main_process=a__ , save_function=a__ , safe_serialization=a__ , variant=a__ , )
idx += 1
__snake_case = model_path_to_save + f"""_{idx}"""
@classmethod
def a (cls : Union[str, Any] , a__ : Optional[Union[str, os.PathLike]] , **a__ : Any ):
"""simple docstring"""
__snake_case = 0
__snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__snake_case = pretrained_model_path
while os.path.isdir(a__ ):
__snake_case = ControlNetModel.from_pretrained(a__ , **a__ )
controlnets.append(a__ )
idx += 1
__snake_case = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(a__ )} controlnets loaded from {pretrained_model_path}.""" )
if len(a__ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(a__ )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(a__ )
| 353 |
import re
import string
import numpy as np
import datasets
snake_case_ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
snake_case_ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
snake_case_ = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def a (self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def a (self : List[Any] , a__ : int , a__ : Optional[int] , a__ : Optional[Any]=None , a__ : Any=False , a__ : Dict=False , a__ : Tuple=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__snake_case = np.array([re.sub(a__ , '''''' , a__ ) for x in predictions] )
__snake_case = np.array([re.sub(a__ , '''''' , a__ ) for x in references] )
else:
__snake_case = np.asarray(a__ )
__snake_case = np.asarray(a__ )
if ignore_case:
__snake_case = np.char.lower(a__ )
__snake_case = np.char.lower(a__ )
if ignore_punctuation:
__snake_case = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
__snake_case = np.char.translate(a__ , table=a__ )
__snake_case = np.char.translate(a__ , table=a__ )
if ignore_numbers:
__snake_case = string.digits.maketrans('''''' , '''''' , string.digits )
__snake_case = np.char.translate(a__ , table=a__ )
__snake_case = np.char.translate(a__ , table=a__ )
__snake_case = predictions == references
return {"exact_match": np.mean(a__ ) * 100}
| 238 | 0 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( snake_case_):
UpperCAmelCase__ : Optional[Any] = (UnCLIPScheduler,)
def lowercase_ ( self :List[str] , **_A :Any ) -> Tuple:
'''simple docstring'''
__A = {
'num_train_timesteps': 1_000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**_A )
return config
def lowercase_ ( self :Dict ) -> Optional[int]:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_A )
def lowercase_ ( self :Dict ) -> Dict:
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_A )
def lowercase_ ( self :str ) -> Dict:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def lowercase_ ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_A )
def lowercase_ ( self :Dict ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_A )
def lowercase_ ( self :Dict ) -> Optional[int]:
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_A , prev_timestep=_A )
def lowercase_ ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
__A = self.scheduler_classes[0]
__A = self.get_scheduler_config(variance_type='fixed_small_log' )
__A = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def lowercase_ ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__A = self.scheduler_classes[0]
__A = self.get_scheduler_config(variance_type='learned_range' )
__A = scheduler_class(**_A )
__A = 0.5
assert scheduler._get_variance(1 , predicted_variance=_A ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_A ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_A ) - -0.0_010_011 < 1E-5
def lowercase_ ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
__A = self.scheduler_classes[0]
__A = self.get_scheduler_config()
__A = scheduler_class(**_A )
__A = scheduler.timesteps
__A = self.dummy_model()
__A = self.dummy_sample_deter
__A = torch.manual_seed(0 )
for i, t in enumerate(_A ):
# 1. predict noise residual
__A = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__A = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
__A = pred_prev_sample
__A = torch.sum(torch.abs(_A ) )
__A = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def lowercase_ ( self :Dict ) -> int:
'''simple docstring'''
__A = self.scheduler_classes[0]
__A = self.get_scheduler_config()
__A = scheduler_class(**_A )
scheduler.set_timesteps(25 )
__A = scheduler.timesteps
__A = self.dummy_model()
__A = self.dummy_sample_deter
__A = torch.manual_seed(0 )
for i, t in enumerate(_A ):
# 1. predict noise residual
__A = model(_A , _A )
if i + 1 == timesteps.shape[0]:
__A = None
else:
__A = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__A = scheduler.step(
_A , _A , _A , prev_timestep=_A , generator=_A ).prev_sample
__A = pred_prev_sample
__A = torch.sum(torch.abs(_A ) )
__A = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def lowercase_ ( self :Any ) -> Dict:
'''simple docstring'''
pass
def lowercase_ ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
pass
| 161 | """simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__:Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase( a , a , a , a , a ):
for attribute in key.split("." ):
__a = getattr(a , a )
if weight_type is not None:
__a = getattr(a , a ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase( a , a ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.feature_extractor
__a = hf_model.adapter
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == "group" , )
__a = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(a , a , a , a )
__a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(a )[0].split("." )[-2]
__a = mapped_key.replace("*" , a )
if "weight_g" in name:
__a = "weight_g"
elif "weight_v" in name:
__a = "weight_v"
elif "bias" in name:
__a = "bias"
elif "weight" in name:
__a = "weight"
else:
__a = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"Unused weights: {unused_weights}" )
def _lowerCamelCase( a , a , a , a , a ):
__a = full_name.split("conv_layers." )[-1]
__a = name.split("." )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a , a , a , a ):
__a = full_name.split("adaptor." )[-1]
__a = name.split("." )
if items[1].isdigit():
__a = int(items[1] )
else:
__a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
__a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a , a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a ):
__a , __a = emb.weight.shape
__a = nn.Linear(a , a , bias=a )
__a = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCamelCase( a , a , a , a , a , a , a , a , a , a , a , ):
__a = WavaVecaConfig.from_pretrained(
a , add_adapter=a , adapter_stride=a , adapter_kernel_size=a , use_auth_token=a , output_hidden_size=a , )
__a = MBartConfig.from_pretrained(a )
# load model
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
__a = model[0].eval()
# load feature extractor
__a = WavaVecaFeatureExtractor.from_pretrained(a , use_auth_token=a )
# set weights for wav2vec2 encoder
__a = WavaVecaModel(a )
recursively_load_weights_wavaveca(model.encoder , a )
# load decoder weights
__a = MBartForCausalLM(a )
__a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
__a = SpeechEncoderDecoderModel(encoder=a , decoder=a )
__a = False
__a = MBartaaTokenizer(a )
tokenizer.save_pretrained(a )
__a = hf_wavavec.config.to_dict()
__a = tokenizer.pad_token_id
__a = tokenizer.bos_token_id
__a = tokenizer.eos_token_id
__a = "mbart50"
__a = "wav2vec2"
__a = tokenizer.eos_token_id
__a = 2_5_0_0_0_4
__a = tokenizer.eos_token_id
__a = SpeechEncoderDecoderConfig.from_dict(a )
hf_wavavec.save_pretrained(a )
feature_extractor.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 261 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Collection[float] | None = None ) -> None:
'''simple docstring'''
if components is None:
A__ : Optional[Any] =[]
A__ : Union[str, Any] =list(lowerCAmelCase_ )
def __len__( self : Union[str, Any] ) -> int:
'''simple docstring'''
return len(self.__components )
def __str__( self : Optional[Any] ) -> str:
'''simple docstring'''
return "(" + ",".join(map(lowerCAmelCase_ , self.__components ) ) + ")"
def __add__( self : List[str] , lowerCAmelCase_ : Vector ) -> Vector:
'''simple docstring'''
A__ : str =len(self )
if size == len(lowerCAmelCase_ ):
A__ : Dict =[self.__components[i] + other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else:
raise Exception("""must have the same size""" )
def __sub__( self : Any , lowerCAmelCase_ : Vector ) -> Vector:
'''simple docstring'''
A__ : Optional[int] =len(self )
if size == len(lowerCAmelCase_ ):
A__ : Optional[Any] =[self.__components[i] - other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self : Union[str, Any] , lowerCAmelCase_ : float ) -> Vector:
'''simple docstring'''
...
@overload
def __mul__( self : List[str] , lowerCAmelCase_ : Vector ) -> float:
'''simple docstring'''
...
def __mul__( self : Any , lowerCAmelCase_ : float | Vector ) -> float | Vector:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , (float, int) ):
A__ : Any =[c * other for c in self.__components]
return Vector(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(self ) == len(lowerCAmelCase_ ):
A__ : Tuple =len(self )
A__ : Tuple =[self.__components[i] * other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return sum(lowerCAmelCase_ )
else: # error case
raise Exception("""invalid operand!""" )
def lowercase__ ( self : Tuple ) -> Vector:
'''simple docstring'''
return Vector(self.__components )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> None:
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
A__ : str =value
def lowercase__ ( self : Dict ) -> float:
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
A__ : Dict =[c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase_ ) )
def lowercase__ ( self : str , lowerCAmelCase_ : Vector , lowerCAmelCase_ : bool = False ) -> float:
'''simple docstring'''
A__ : str =self * other
A__ : Optional[Any] =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __lowerCamelCase ( __snake_case : int ) -> Vector:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case )
return Vector([0] * dimension )
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> Vector:
"""simple docstring"""
assert isinstance(__snake_case, __snake_case ) and (isinstance(__snake_case, __snake_case ))
A__ : Optional[int] =[0] * dimension
A__ : Dict =1
return Vector(__snake_case )
def __lowerCamelCase ( __snake_case : float, __snake_case : Vector, __snake_case : Vector ) -> Vector:
"""simple docstring"""
assert (
isinstance(__snake_case, __snake_case )
and isinstance(__snake_case, __snake_case )
and (isinstance(__snake_case, (int, float) ))
)
return x * scalar + y
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : int ) -> Vector:
"""simple docstring"""
random.seed(__snake_case )
A__ : Optional[int] =[random.randint(__snake_case, __snake_case ) for _ in range(__snake_case )]
return Vector(__snake_case )
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : list[list[float]] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
A__ : int =matrix
A__ : Dict =w
A__ : Dict =h
def __str__( self : Tuple ) -> str:
'''simple docstring'''
A__ : Any =""""""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : List[str] , lowerCAmelCase_ : Matrix ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
A__ : Dict =[]
for i in range(self.__height ):
A__ : Any =[
self.__matrix[i][j] + other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self : int , lowerCAmelCase_ : Matrix ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
A__ : str =[]
for i in range(self.__height ):
A__ : Tuple =[
self.__matrix[i][j] - other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self : str , lowerCAmelCase_ : float ) -> Matrix:
'''simple docstring'''
...
@overload
def __mul__( self : Tuple , lowerCAmelCase_ : Vector ) -> Vector:
'''simple docstring'''
...
def __mul__( self : List[Any] , lowerCAmelCase_ : float | Vector ) -> Vector | Matrix:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # matrix-vector
if len(lowerCAmelCase_ ) == self.__width:
A__ : int =zero_vector(self.__height )
for i in range(self.__height ):
A__ : str =[
self.__matrix[i][j] * other.component(lowerCAmelCase_ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase_ , sum(lowerCAmelCase_ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCAmelCase_ , (int, float) ): # matrix-scalar
A__ : str =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
return None
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
return self.__height
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.__width
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> None:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
A__ : Optional[Any] =value
else:
raise Exception("""change_component: indices out of bounds""" )
def lowercase__ ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
A__ : Union[str, Any] =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase_ ) ):
A__ : Dict =minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase_ , self.__width - 1 , self.__height - 1 ).determinant()
def lowercase__ ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase_ , lowerCAmelCase_ )
else:
raise Exception("""Indices out of bounds""" )
def lowercase__ ( self : Dict ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A__ : str =[
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase_ ) for y in range(self.__width )
]
return sum(lowerCAmelCase_ )
def __lowerCamelCase ( __snake_case : int ) -> Matrix:
"""simple docstring"""
A__ : list[list[float]] =[[0] * n for _ in range(__snake_case )]
return Matrix(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : int, __snake_case : int ) -> Matrix:
"""simple docstring"""
random.seed(__snake_case )
A__ : list[list[float]] =[
[random.randint(__snake_case, __snake_case ) for _ in range(__snake_case )] for _ in range(__snake_case )
]
return Matrix(__snake_case, __snake_case, __snake_case )
| 136 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : int =tempfile.mkdtemp()
# fmt: off
A__ : Optional[int] =["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ : List[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Tuple =["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A__ : int ={"""unk_token""": """<unk>"""}
A__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
A__ : Dict ={
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
A__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , **lowerCAmelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : Dict =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ : int =[Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : int =self.get_tokenizer()
A__ : Optional[int] =self.get_rust_tokenizer()
A__ : Any =self.get_image_processor()
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
A__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase_ )
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
A__ : Optional[int] =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ : List[Any] =self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ : Union[str, Any] =self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
A__ : Optional[int] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
A__ : Optional[Any] =self.get_image_processor()
A__ : int =self.get_tokenizer()
A__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Dict =self.prepare_image_inputs()
A__ : List[Any] =image_processor(lowerCAmelCase_ , return_tensors="""np""" )
A__ : List[Any] =processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : Any =self.get_image_processor()
A__ : Optional[Any] =self.get_tokenizer()
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Any ="""lower newer"""
A__ : Optional[int] =processor(text=lowerCAmelCase_ )
A__ : Optional[int] =tokenizer(lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any =self.get_image_processor()
A__ : Optional[Any] =self.get_tokenizer()
A__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Optional[Any] ="""lower newer"""
A__ : List[str] =self.prepare_image_inputs()
A__ : Optional[Any] =processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[Any] =self.get_image_processor()
A__ : List[str] =self.get_tokenizer()
A__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Tuple =self.prepare_image_inputs()
A__ : str =self.prepare_image_inputs()
A__ : int =processor(images=lowerCAmelCase_ , visual_prompt=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.get_image_processor()
A__ : Optional[int] =self.get_tokenizer()
A__ : Any =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : List[Any] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ : Tuple =processor.batch_decode(lowerCAmelCase_ )
A__ : List[Any] =tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 136 | 1 |
"""simple docstring"""
class snake_case :
"""simple docstring"""
def __init__( self : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : List[str]=None ):
UpperCAmelCase__ = data
UpperCAmelCase__ = previous
UpperCAmelCase__ = next_node
def __str__( self : List[Any] ):
return f'''{self.data}'''
def __lowerCAmelCase ( self : str ):
return self.data
def __lowerCAmelCase ( self : Tuple ):
return self.next
def __lowerCAmelCase ( self : List[Any] ):
return self.previous
class snake_case :
"""simple docstring"""
def __init__( self : Any ,lowerCamelCase__ : List[Any] ):
UpperCAmelCase__ = head
def __iter__( self : Optional[int] ):
return self
def __lowerCAmelCase ( self : List[Any] ):
if not self.current:
raise StopIteration
else:
UpperCAmelCase__ = self.current.get_data()
UpperCAmelCase__ = self.current.get_next()
return value
class snake_case :
"""simple docstring"""
def __init__( self : Dict ):
UpperCAmelCase__ = None # First node in list
UpperCAmelCase__ = None # Last node in list
def __str__( self : Tuple ):
UpperCAmelCase__ = self.head
UpperCAmelCase__ = []
while current is not None:
nodes.append(current.get_data() )
UpperCAmelCase__ = current.get_next()
return " ".join(str(lowerCamelCase__ ) for node in nodes )
def __contains__( self : Optional[int] ,lowerCamelCase__ : int ):
UpperCAmelCase__ = self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase__ = current.get_next()
return False
def __iter__( self : str ):
return LinkedListIterator(self.head )
def __lowerCAmelCase ( self : str ):
if self.head:
return self.head.get_data()
return None
def __lowerCAmelCase ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Node ):
if self.head is None:
UpperCAmelCase__ = node
UpperCAmelCase__ = node
else:
self.insert_before_node(self.head ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Node ):
if self.head is None:
self.set_head(lowerCamelCase__ )
else:
self.insert_after_node(self.tail ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : int ):
UpperCAmelCase__ = Node(lowerCamelCase__ )
if self.head is None:
self.set_head(lowerCamelCase__ )
else:
self.set_tail(lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : Node ,lowerCamelCase__ : Node ):
UpperCAmelCase__ = node
UpperCAmelCase__ = node.previous
if node.get_previous() is None:
UpperCAmelCase__ = node_to_insert
else:
UpperCAmelCase__ = node_to_insert
UpperCAmelCase__ = node_to_insert
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Node ,lowerCamelCase__ : Node ):
UpperCAmelCase__ = node
UpperCAmelCase__ = node.next
if node.get_next() is None:
UpperCAmelCase__ = node_to_insert
else:
UpperCAmelCase__ = node_to_insert
UpperCAmelCase__ = node_to_insert
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
UpperCAmelCase__ = 1
UpperCAmelCase__ = Node(lowerCamelCase__ )
UpperCAmelCase__ = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCamelCase__ ,lowerCamelCase__ )
return
current_position += 1
UpperCAmelCase__ = node.next
self.insert_after_node(self.tail ,lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : int ):
UpperCAmelCase__ = self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase__ = node.get_next()
raise Exception('Node not found' )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Any ):
if (node := self.get_node(lowerCamelCase__ )) is not None:
if node == self.head:
UpperCAmelCase__ = self.head.get_next()
if node == self.tail:
UpperCAmelCase__ = self.tail.get_previous()
self.remove_node_pointers(lowerCamelCase__ )
@staticmethod
def __lowerCAmelCase ( lowerCamelCase__ : Node ):
if node.get_next():
UpperCAmelCase__ = node.previous
if node.get_previous():
UpperCAmelCase__ = node.next
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __lowerCAmelCase ( self : Union[str, Any] ):
return self.head is None
def a_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCAmelCase ( unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = JukeboxTokenizer
__UpperCAmelCase : Union[str, Any] = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
import torch
snake_case : Any = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
snake_case : Optional[Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
snake_case : Optional[int] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
import torch
snake_case : Tuple = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
snake_case : Optional[Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
snake_case : List[Any] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 203 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase__ = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase__ = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'config.{attribute}' in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
lowercase = True
# Deal with multi-line cases
elif (
re.search(
Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , lowerCAmelCase__ , )
is not None
):
lowercase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowercase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowercase = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
lowercase = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
lowercase = True
if not attribute_used:
lowercase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowercase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowercase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowercase = True
elif attribute.endswith('''_token_id''' ):
lowercase = True
# configuration class specific cases
if not case_allowed:
lowercase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowercase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = dict(inspect.signature(config_class.__init__ ).parameters )
lowercase = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
lowercase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowercase = {}
if len(config_class.attribute_map ) > 0:
lowercase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowercase = inspect.getsourcefile(lowerCAmelCase__ )
lowercase = os.path.dirname(lowerCAmelCase__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowercase = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for fn in os.listdir(lowerCAmelCase__ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
lowercase = []
for path in modeling_paths:
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as fp:
modeling_sources.append(fp.read() )
lowercase = []
for config_param, default_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
# `attributes` here is all the variant names for `config_param`
lowercase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCAmelCase__ )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowercase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCAmelCase__ : inspect.isclass(lowerCAmelCase__ )
and issubclass(lowerCAmelCase__ , lowerCAmelCase__ )
and inspect.getmodule(lowerCAmelCase__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowercase = check_config_attributes_being_used(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowercase = unused_attributes
if len(lowerCAmelCase__ ) > 0:
lowercase = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f'{name}: {attributes}\n'
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
check_config_attributes()
| 359 |
from pathlib import Path
import fire
from tqdm import tqdm
def UpperCamelCase ( lowerCAmelCase__="ro" , lowerCAmelCase__="en" , lowerCAmelCase__="wmt16" , lowerCAmelCase__=None ):
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
lowercase = f'{src_lang}-{tgt_lang}'
print(f'Converting {dataset}-{pair}' )
lowercase = datasets.load_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
if save_dir is None:
lowercase = f'{dataset}-{pair}'
lowercase = Path(lowerCAmelCase__ )
save_dir.mkdir(exist_ok=lowerCAmelCase__ )
for split in ds.keys():
print(f'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
lowercase = '''val''' if split == '''validation''' else split
lowercase = save_dir.joinpath(f'{fn}.source' )
lowercase = save_dir.joinpath(f'{fn}.target' )
lowercase = src_path.open('''w+''' )
lowercase = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowercase = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 97 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__A = logging.get_logger(__name__)
# TODO: upload to AWS
__A = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class UpperCAmelCase (UpperCAmelCase__ ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = """retribert"""
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowercase__: Optional[int] = vocab_size
lowercase__: Optional[Any] = hidden_size
lowercase__: List[Any] = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: Any = hidden_act
lowercase__: str = intermediate_size
lowercase__: List[str] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: Optional[int] = max_position_embeddings
lowercase__: List[Any] = type_vocab_size
lowercase__: List[Any] = initializer_range
lowercase__: Dict = layer_norm_eps
lowercase__: Tuple = share_encoders
lowercase__: List[Any] = projection_dim
| 177 | '''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Tuple:
raise NotImplementedError()
@abstractmethod
def a ( self : int ) -> Union[str, Any]:
raise NotImplementedError()
| 229 | 0 |
from manim import *
class __A( __lowerCamelCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase__ = Rectangle(height=0.25 , width=0.25 )
UpperCamelCase__ = [mem.copy() for i in range(6 )]
UpperCamelCase__ = [mem.copy() for i in range(6 )]
UpperCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
UpperCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
UpperCamelCase__ = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
UpperCamelCase__ = Text("""CPU""" , font_size=24 )
UpperCamelCase__ = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [mem.copy() for i in range(4 )]
UpperCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
UpperCamelCase__ = Text("""GPU""" , font_size=24 )
UpperCamelCase__ = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
gpu.move_to([-1, -1, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [mem.copy() for i in range(6 )]
UpperCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
UpperCamelCase__ = Text("""Model""" , font_size=24 )
UpperCamelCase__ = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
model.move_to([3, -1.0, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = []
UpperCamelCase__ = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = fill.copy().set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.8 )
target.move_to(SCREAMING_SNAKE_CASE_ )
model_arr.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(SCREAMING_SNAKE_CASE_ )
self.add(*SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCamelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
UpperCamelCase__ = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
UpperCamelCase__ = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
UpperCamelCase__ = Text("""Disk""" , font_size=24 )
UpperCamelCase__ = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
disk.move_to([-4, -1.25, 0] )
self.add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase__ = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(SCREAMING_SNAKE_CASE_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = MarkupText(
F"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = Square(0.3 )
input.set_fill(SCREAMING_SNAKE_CASE_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , SCREAMING_SNAKE_CASE_ , buff=0.5 )
self.play(Write(SCREAMING_SNAKE_CASE_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=SCREAMING_SNAKE_CASE_ , buff=0.02 )
self.play(MoveToTarget(SCREAMING_SNAKE_CASE_ ) )
self.play(FadeOut(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = Arrow(start=SCREAMING_SNAKE_CASE_ , end=SCREAMING_SNAKE_CASE_ , color=SCREAMING_SNAKE_CASE_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , SCREAMING_SNAKE_CASE_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCamelCase__ = MarkupText(
F"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=3 ) )
UpperCamelCase__ = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(SCREAMING_SNAKE_CASE_ ) , Circumscribe(model_arr[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(model_cpu_arr[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCamelCase__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , SCREAMING_SNAKE_CASE_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
UpperCamelCase__ = AnimationGroup(
FadeOut(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , FadeIn(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(SCREAMING_SNAKE_CASE_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCamelCase__ = 0.7
self.play(
Circumscribe(model_arr[i] , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(cpu_left_col_base[i] , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(model_arr[i + 1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(cpu_left_col_base[-1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCamelCase__ = a_c
UpperCamelCase__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(SCREAMING_SNAKE_CASE_ ) , FadeOut(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , )
UpperCamelCase__ = MarkupText(F"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=3 ) , MoveToTarget(SCREAMING_SNAKE_CASE_ ) )
self.wait()
| 365 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = BlipImageProcessor()
UpperCamelCase__ = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCamelCase__ = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
UpperCamelCase__ = InstructBlipProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).tokenizer
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).image_processor
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).qformer_tokenizer
def UpperCAmelCase_ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ (self ):
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase__ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase__ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor.qformer_tokenizer , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""np""" )
UpperCamelCase__ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = qformer_tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_qformer_tokenizer()
UpperCamelCase__ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 178 | 0 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowercase__ :Optional[Any] = parse(importlib.metadata.version("torch"))
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = parse(importlib.metadata.version(lowerCAmelCase__ ) )
return operation(lowerCAmelCase__ , parse(lowerCAmelCase__ ) )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
| 101 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
for char in word:
snake_case_ = ord(snake_case )
if not _is_chinese_char(snake_case ):
return 0
return 1
def UpperCamelCase_( snake_case : List[str] ):
'''simple docstring'''
snake_case_ = set()
for token in tokens:
snake_case_ = len(snake_case ) > 1 and is_chinese(snake_case )
if chinese_word:
word_set.add(snake_case )
snake_case_ = list(snake_case )
return word_list
def UpperCamelCase_( snake_case : List[str] , snake_case : set() ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case_ = max([len(snake_case ) for w in chinese_word_set] )
snake_case_ = bert_tokens
snake_case_ , snake_case_ = 0, len(snake_case )
while start < end:
snake_case_ = True
if is_chinese(bert_word[start] ):
snake_case_ = min(end - start , snake_case )
for i in range(snake_case , 1 , -1 ):
snake_case_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ = "##" + bert_word[j]
snake_case_ = start + i
snake_case_ = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase_( snake_case : List[str] , snake_case : LTP , snake_case : BertTokenizer ):
'''simple docstring'''
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["cws"] ).cws
snake_case_ = [get_chinese_word(snake_case ) for r in res]
ltp_res.extend(snake_case )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=snake_case , truncation=snake_case , max_length=5_1_2 )
bert_res.extend(res["input_ids"] )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for input_ids, chinese_word in zip(snake_case , snake_case ):
snake_case_ = []
for id in input_ids:
snake_case_ = bert_tokenizer._convert_id_to_token(snake_case )
input_tokens.append(snake_case )
snake_case_ = add_sub_symbol(snake_case , snake_case )
snake_case_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case ):
if token[:2] == "##":
snake_case_ = token[2:]
# save chinese tokens' pos
if len(snake_case ) == 1 and _is_chinese_char(ord(snake_case ) ):
ref_id.append(snake_case )
ref_ids.append(snake_case )
assert len(snake_case ) == len(snake_case )
return ref_ids
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.strip() for line in data if len(snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ = LTP(args.ltp ) # faster in GPU device
snake_case_ = BertTokenizer.from_pretrained(args.bert )
snake_case_ = prepare_ref(snake_case , snake_case , snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case_ = [json.dumps(snake_case ) + "\n" for ref in ref_ids]
f.writelines(snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
main(args)
| 85 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : List[str] = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 50 ) ->int:
'''simple docstring'''
a : Any = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 79 | 1 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase="shi-labs/oneformer_demo" ) -> List[Any]:
with open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) as f:
lowerCamelCase__ : str = json.load(_UpperCAmelCase )
lowerCamelCase__ : Tuple = {}
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : str = []
for key, info in class_info.items():
lowerCamelCase__ : Union[str, Any] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(_UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = thing_ids
lowerCamelCase__ : Union[str, Any] = class_names
return metadata
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : int=30 , UpperCAmelCase : List[str]=400 , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Any=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Dict=[0.5, 0.5, 0.5] , UpperCAmelCase : List[str]=[0.5, 0.5, 0.5] , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[int]=255 , UpperCAmelCase : Any="shi-labs/oneformer_demo" , UpperCAmelCase : Any="ade20k_panoptic.json" , UpperCAmelCase : List[Any]=10 , ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : Union[str, Any] = min_resolution
lowerCamelCase__ : int = max_resolution
lowerCamelCase__ : Dict = do_resize
lowerCamelCase__ : Optional[int] = {'shortest_edge': 32, 'longest_edge': 1333} if size is None else size
lowerCamelCase__ : Dict = do_normalize
lowerCamelCase__ : Tuple = image_mean
lowerCamelCase__ : List[str] = image_std
lowerCamelCase__ : Any = class_info_file
lowerCamelCase__ : Any = prepare_metadata(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = num_text
lowerCamelCase__ : List[str] = repo_path
# for the post_process_functions
lowerCamelCase__ : Any = 2
lowerCamelCase__ : str = 10
lowerCamelCase__ : str = 10
lowerCamelCase__ : Any = 3
lowerCamelCase__ : Union[str, Any] = 4
lowerCamelCase__ : Any = num_labels
lowerCamelCase__ : str = do_reduce_labels
lowerCamelCase__ : str = ignore_index
def A_ ( self : Union[str, Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A_ ( self : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any]=False ) -> int:
if not batched:
lowerCamelCase__ : List[str] = image_inputs[0]
if isinstance(UpperCAmelCase , Image.Image ):
lowerCamelCase__ , lowerCamelCase__ : Tuple = image.size
else:
lowerCamelCase__ , lowerCamelCase__ : Dict = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase__ : Dict = int(self.size['shortest_edge'] * h / w )
lowerCamelCase__ : List[Any] = self.size['shortest_edge']
elif w > h:
lowerCamelCase__ : Optional[Any] = self.size['shortest_edge']
lowerCamelCase__ : str = int(self.size['shortest_edge'] * w / h )
else:
lowerCamelCase__ : str = self.size['shortest_edge']
lowerCamelCase__ : Union[str, Any] = self.size['shortest_edge']
else:
lowerCamelCase__ : Any = []
for image in image_inputs:
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase__ : Optional[Any] = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0]
lowerCamelCase__ : str = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
def A_ ( self : Tuple ) -> Tuple:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
UpperCAmelCase__ = image_processing_class
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def A_ ( self : str ) -> int:
return self.image_processing_tester.prepare_image_processor_dict()
def A_ ( self : int ) -> Any:
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_reduce_labels' ) )
def A_ ( self : str ) -> List[Any]:
pass
def A_ ( self : Tuple ) -> Union[str, Any]:
# Initialize image_processor
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowerCamelCase__ : List[str] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processing_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.image_processing_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
lowerCamelCase__ : List[str] = image_processor(
UpperCAmelCase , ['semantic'] * len(UpperCAmelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Tuple ) -> str:
# Initialize image_processor
lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[str] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.image_processing_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.image_processing_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
lowerCamelCase__ : str = image_processor(
UpperCAmelCase , ['semantic'] * len(UpperCAmelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
# Initialize image_processor
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processing_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__ : int = self.image_processing_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
lowerCamelCase__ : int = image_processor(
UpperCAmelCase , ['semantic'] * len(UpperCAmelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : int , UpperCAmelCase : List[str]=False , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Union[str, Any]="np" ) -> str:
lowerCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
lowerCamelCase__ : Dict = self.image_processing_tester.num_labels
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase )
if with_segmentation_maps:
lowerCamelCase__ : Tuple = num_labels
if is_instance_map:
lowerCamelCase__ : Dict = list(range(UpperCAmelCase ) ) * 2
lowerCamelCase__ : Optional[int] = dict(enumerate(UpperCAmelCase ) )
lowerCamelCase__ : int = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
lowerCamelCase__ : Optional[int] = [Image.fromarray(UpperCAmelCase ) for annotation in annotations]
lowerCamelCase__ : List[str] = image_processor(
UpperCAmelCase , ['semantic'] * len(UpperCAmelCase ) , UpperCAmelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCAmelCase , pad_and_return_pixel_mask=UpperCAmelCase , )
return inputs
def A_ ( self : str ) -> Any:
pass
def A_ ( self : Tuple ) -> List[Any]:
def common(UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Optional[Any]=None ):
lowerCamelCase__ : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCAmelCase , is_instance_map=UpperCAmelCase , segmentation_type=UpperCAmelCase )
lowerCamelCase__ : Tuple = inputs['mask_labels']
lowerCamelCase__ : Union[str, Any] = inputs['class_labels']
lowerCamelCase__ : Optional[Any] = inputs['pixel_values']
lowerCamelCase__ : List[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCAmelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCAmelCase )
common(is_instance_map=UpperCAmelCase , segmentation_type='pil' )
common(is_instance_map=UpperCAmelCase , segmentation_type='pil' )
def A_ ( self : Optional[int] ) -> Any:
lowerCamelCase__ : Dict = np.zeros((20, 50) )
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Optional[int] = 1
lowerCamelCase__ : Union[str, Any] = binary_mask_to_rle(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A_ ( self : Union[str, Any] ) -> str:
lowerCamelCase__ : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowerCamelCase__ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase__ : Any = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
lowerCamelCase__ : Any = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
lowerCamelCase__ : Dict = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase , target_sizes=UpperCAmelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A_ ( self : List[str] ) -> List[str]:
lowerCamelCase__ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowerCamelCase__ : Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase__ : str = image_processor.post_process_instance_segmentation(UpperCAmelCase , threshold=0 )
self.assertTrue(len(UpperCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCAmelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A_ ( self : Any ) -> Union[str, Any]:
lowerCamelCase__ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowerCamelCase__ : int = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase__ : Tuple = image_processor.post_process_panoptic_segmentation(UpperCAmelCase , threshold=0 )
self.assertTrue(len(UpperCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCAmelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 50 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) < k or k < 0:
raise ValueError('Invalid Input' )
UpperCAmelCase : Tuple = sum(array[:k] )
for i in range(len(UpperCAmelCase_ ) - k ):
UpperCAmelCase : Optional[Any] = current_sum - array[i] + array[i + k]
UpperCAmelCase : List[Any] = max(UpperCAmelCase_ , UpperCAmelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowercase__ = [randint(-1000, 1000) for i in range(100)]
lowercase__ = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 151 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowercase : List[str] = '\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n'
__lowercase : List[Any] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
__lowercase : int = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def __UpperCAmelCase ( self , __a , __a , __a = CHRF.CHAR_ORDER , __a = CHRF.WORD_ORDER , __a = CHRF.BETA , __a = False , __a = False , __a = False , ):
'''simple docstring'''
__a : List[Any] = len(references[0] )
if any(len(__a ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
__a : List[Any] = [[refs[i] for refs in references] for i in range(__a )]
__a : List[Any] = CHRF(__a , __a , __a , __a , __a , __a )
__a : int = sb_chrf.corpus_score(__a , __a )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 362 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ):
__a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[Any] = model
__a : Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
__a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
__a : Any = forward.__wrapped__
if forward == original_forward:
break
__a : str = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[str] = model
__a : Optional[int] = compiled_model
return model
def lowerCamelCase ():
PartialState().wait_for_everyone()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ):
for key, value in kwargs.items():
__a : Optional[int] = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a : Tuple = value
return destination
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ):
if port is None:
__a : List[str] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 294 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase ):
@property
def __magic_name__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __magic_name__ ( self : Dict ) -> Dict:
'''simple docstring'''
A__ = self.dummy_uncond_unet
A__ = PNDMScheduler()
A__ = PNDMPipeline(unet=lowercase_ , scheduler=lowercase_ )
pndm.to(lowercase_ )
pndm.set_progress_bar_config(disable=lowercase_ )
A__ = torch.manual_seed(0 )
A__ = pndm(generator=lowercase_ , num_inference_steps=20 , output_type="numpy" ).images
A__ = torch.manual_seed(0 )
A__ = pndm(generator=lowercase_ , num_inference_steps=20 , output_type="numpy" , return_dict=lowercase_ )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ = """google/ddpm-cifar10-32"""
A__ = UNetaDModel.from_pretrained(lowercase_ )
A__ = PNDMScheduler()
A__ = PNDMPipeline(unet=lowercase_ , scheduler=lowercase_ )
pndm.to(lowercase_ )
pndm.set_progress_bar_config(disable=lowercase_ )
A__ = torch.manual_seed(0 )
A__ = pndm(generator=lowercase_ , output_type="numpy" ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 247 | '''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Optional[Any] , lowercase_ : str ):
lowercase_ : int = data
def __iter__( self : int ):
for element in self.data:
yield element
def lowerCamelCase ( UpperCAmelCase__ : Any=True ) -> Any:
lowercase_ : Optional[int] = Accelerator(even_batches=UpperCAmelCase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowerCamelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ) -> Optional[Any]:
if iterable:
lowercase_ : Dict = DummyIterableDataset(torch.as_tensor(range(UpperCAmelCase__ ) ) )
else:
lowercase_ : Union[str, Any] = TensorDataset(torch.as_tensor(range(UpperCAmelCase__ ) ) )
lowercase_ : Any = DataLoader(UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
lowercase_ : Any = accelerator.prepare(UpperCAmelCase__ )
return dl
def lowerCamelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , ) -> int:
lowercase_ : List[str] = create_dataloader(accelerator=UpperCAmelCase__ , dataset_size=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
lowercase_ : Tuple = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowerCamelCase ( ) -> int:
lowercase_ : Tuple = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : Optional[int] = create_accelerator(even_batches=UpperCAmelCase__ )
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
UpperCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = create_accelerator(even_batches=UpperCAmelCase__ )
lowercase_ : Dict = torch.nn.Linear(1 , 1 )
lowercase_ : Optional[Any] = accelerator.prepare(UpperCAmelCase__ )
lowercase_ : List[Any] = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
lowercase_ : Optional[Any] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(UpperCAmelCase__ ):
lowercase_ : Any = ddp_model(batch[0].float() )
lowercase_ : List[str] = output.sum()
loss.backward()
batch_idxs.append(UpperCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> List[str]:
with warnings.catch_warnings(record=UpperCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , UpperCAmelCase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def lowerCamelCase ( ) -> Any:
lowercase_ : str = True
lowercase_ : Tuple = False
lowercase_ : str = create_accelerator(even_batches=UpperCAmelCase__ )
lowercase_ : Union[str, Any] = torch.nn.Linear(1 , 1 )
lowercase_ : Any = accelerator.prepare(UpperCAmelCase__ )
lowercase_ : Optional[int] = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
lowercase_ : List[Any] = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = train_dl.batch_sampler.even_batches
lowercase_ : List[str] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowerCamelCase ( ) -> Dict:
lowercase_ : str = True
lowercase_ : Optional[Any] = False
lowercase_ : Union[str, Any] = create_accelerator(even_batches=UpperCAmelCase__ )
lowercase_ : Optional[int] = torch.nn.Linear(1 , 1 )
lowercase_ : Optional[int] = accelerator.prepare(UpperCAmelCase__ )
create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase__ )
lowercase_ : List[str] = create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
lowercase_ : Optional[Any] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[Any] = create_accelerator()
lowercase_ : Optional[int] = torch.nn.Linear(1 , 1 )
lowercase_ : List[Any] = accelerator.prepare(UpperCAmelCase__ )
create_dataloader(UpperCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCAmelCase__ )
with warnings.catch_warnings(record=UpperCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCAmelCase__ ):
pass
assert issubclass(w[-1].category , UpperCAmelCase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def lowerCamelCase ( ) -> List[str]:
lowercase_ : List[Any] = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
lowercase_ : List[Any] = accelerator.state.distributed_type
lowercase_ : Union[str, Any] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(UpperCAmelCase__ )
lowercase_ : str = original_state
if __name__ == "__main__":
main()
| 239 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase__ : int = logging.get_logger('''transformers.models.encodec''')
lowercase__ : Optional[int] = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowercase__ : Tuple = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowercase__ : List[str] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowercase__ : List[Any] = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowercase__ : int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowercase__ : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase__ : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase__ : int = []
lowercase__ : Dict = []
def __lowercase ( _a , _a , _a , _a , _a ):
for attribute in key.split('''.''' ):
snake_case_ : Optional[Any] = getattr(_a , _a )
if weight_type is not None:
snake_case_ : Union[str, Any] = getattr(_a , _a ).shape
else:
snake_case_ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
snake_case_ : Dict = value
elif weight_type == "weight_g":
snake_case_ : List[Any] = value
elif weight_type == "weight_v":
snake_case_ : List[Any] = value
elif weight_type == "bias":
snake_case_ : Optional[Any] = value
elif weight_type == "running_mean":
snake_case_ : str = value
elif weight_type == "running_var":
snake_case_ : List[Any] = value
elif weight_type == "num_batches_tracked":
snake_case_ : Tuple = value
elif weight_type == "weight_ih_l0":
snake_case_ : Dict = value
elif weight_type == "weight_hh_l0":
snake_case_ : str = value
elif weight_type == "bias_ih_l0":
snake_case_ : str = value
elif weight_type == "bias_hh_l0":
snake_case_ : Dict = value
elif weight_type == "weight_ih_l1":
snake_case_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
snake_case_ : Dict = value
elif weight_type == "bias_ih_l1":
snake_case_ : List[str] = value
elif weight_type == "bias_hh_l1":
snake_case_ : Optional[int] = value
else:
snake_case_ : Dict = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def __lowercase ( _a , _a ):
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case_, snake_case_ : Tuple = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowercase ( _a , _a , _a ):
snake_case_ : str = []
if model_name == "encodec_24khz" or "encodec_32khz":
snake_case_ : Any = MAPPING_24K
elif model_name == "encodec_48khz":
snake_case_ : int = MAPPING_48K
else:
raise ValueError(f"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(_a , _a ):
logger.info(f"{name} was ignored" )
continue
snake_case_ : Optional[Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
snake_case_, snake_case_ : List[Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
snake_case_ : Any = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
snake_case_ : str = True
if "*" in mapped_key:
snake_case_ : Optional[Any] = name.split(_a )[0].split('''.''' )[-2]
snake_case_ : str = mapped_key.replace('''*''' , _a )
if "weight_g" in name:
snake_case_ : int = '''weight_g'''
elif "weight_v" in name:
snake_case_ : List[str] = '''weight_v'''
elif "weight_ih_l0" in name:
snake_case_ : List[Any] = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
snake_case_ : Tuple = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
snake_case_ : Any = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
snake_case_ : Dict = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
snake_case_ : str = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
snake_case_ : List[Any] = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
snake_case_ : List[Any] = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
snake_case_ : List[Any] = '''bias_hh_l1'''
elif "bias" in name:
snake_case_ : Optional[int] = '''bias'''
elif "weight" in name:
snake_case_ : str = '''weight'''
elif "running_mean" in name:
snake_case_ : Optional[int] = '''running_mean'''
elif "running_var" in name:
snake_case_ : int = '''running_var'''
elif "num_batches_tracked" in name:
snake_case_ : Optional[int] = '''num_batches_tracked'''
else:
snake_case_ : Optional[Any] = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(f"Unused weights: {unused_weights}" )
@torch.no_grad()
def __lowercase ( _a , _a , _a , _a=None , _a=None , ):
if config_path is not None:
snake_case_ : Optional[int] = EncodecConfig.from_pretrained(_a )
else:
snake_case_ : str = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
snake_case_ : Union[str, Any] = [8, 5, 4, 4]
snake_case_ : Optional[int] = [2.2]
snake_case_ : Any = 64
snake_case_ : Dict = 32_000
snake_case_ : int = 2_048
snake_case_ : int = False
snake_case_ : Optional[int] = False
snake_case_ : Optional[int] = False
elif model_name == "encodec_48khz":
snake_case_ : List[str] = [8, 5, 4, 2]
snake_case_ : List[Any] = [3.0, 6.0, 12.0, 24.0]
snake_case_ : Any = 48_000
snake_case_ : List[str] = 2
snake_case_ : int = False
snake_case_ : str = '''time_group_norm'''
snake_case_ : int = True
snake_case_ : List[str] = 1.0
snake_case_ : Tuple = 0.01
else:
raise ValueError(f"Unknown model name: {model_name}" )
snake_case_ : Any = EncodecModel(_a )
snake_case_ : str = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_a )
snake_case_ : Optional[Any] = torch.load(_a )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
snake_case_ : Union[str, Any] = original_checkpoint['''best_state''']
recursively_load_weights(_a , _a , _a )
model.save_pretrained(_a )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(_a )
model.push_to_hub(_a )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 155 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 155 | 1 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
lowerCamelCase_ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase_ : int = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase_ : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase_ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase_ : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
lowerCamelCase_ : Optional[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
lowerCamelCase_ : Dict = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
lowerCamelCase_ : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCamelCase_ : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCamelCase_ : int = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : str = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowerCamelCase_ : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowerCamelCase_ : Union[str, Any] = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
class __A :
"""simple docstring"""
def __call__( self , __A , __A = None , __A = None , __A = False , __A = False , __A = None , __A = None , __A = None , **__A , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__A , padding=__A , truncation=__A , max_length=__A , return_tensors=__A , return_attention_mask=__A , **__A , )
elif titles is None or texts is None:
a =titles if texts is None else texts
return super().__call__(
__A , __A , padding=__A , truncation=__A , max_length=__A , return_tensors=__A , return_attention_mask=__A , **__A , )
a =titles if not isinstance(__A , __A ) else [titles]
a =texts if not isinstance(__A , __A ) else [texts]
a =len(__A )
a =questions if not isinstance(__A , __A ) else [questions] * n_passages
if len(__A ) != len(__A ):
raise ValueError(
f'''There should be as many titles than texts but got {len(__A )} titles and {len(__A )} texts.''' )
a =super().__call__(__A , __A , padding=__A , truncation=__A )['''input_ids''']
a =super().__call__(__A , add_special_tokens=__A , padding=__A , truncation=__A )['''input_ids''']
a ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__A , __A )
]
}
if return_attention_mask is not False:
a =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
a =attention_mask
return self.pad(__A , padding=__A , max_length=__A , return_tensors=__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = 16 , __A = 64 , __A = 4 , ) -> List[DPRSpanPrediction]:
a =reader_input['''input_ids''']
a , a , a =reader_output[:3]
a =len(__A )
a =sorted(range(__A ) , reverse=__A , key=relevance_logits.__getitem__ )
a =[]
for doc_id in sorted_docs:
a =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
a =sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a =sequence_ids.index(self.pad_token_id )
else:
a =len(__A )
a =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__A , top_spans=__A , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__A , start_index=__A , end_index=__A , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , ) -> List[DPRSpanPrediction]:
a =[]
for start_index, start_score in enumerate(__A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
a =sorted(__A , key=lambda __A : x[1] , reverse=__A )
a =[]
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
a =end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = ["input_ids", "attention_mask"] | 81 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 100_0000 , SCREAMING_SNAKE_CASE__ = 10 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : defaultdict = defaultdict(SCREAMING_SNAKE_CASE__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_SCREAMING_SNAKE_CASE : int = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_SCREAMING_SNAKE_CASE : List[str] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 200 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int = 1_0_0_0 ):
__lowercase = 2**power
__lowercase = 0
while n:
__lowercase , __lowercase = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 217 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _lowerCAmelCase ( ):
print('''Making key files...''' )
make_key_files('''rsa''' , 1_0_2_4 )
print('''Key files generation successful.''' )
def _lowerCAmelCase ( lowerCamelCase_ : int ):
print('''Generating prime p...''' )
__lowercase = rabinMiller.generate_large_prime(lowerCamelCase_ )
print('''Generating prime q...''' )
__lowercase = rabinMiller.generate_large_prime(lowerCamelCase_ )
__lowercase = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' )
while True:
__lowercase = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(lowerCamelCase_ , (p - 1) * (q - 1) ) == 1:
break
print('''Calculating d that is mod inverse of e...''' )
__lowercase = cryptoMath.find_mod_inverse(lowerCamelCase_ , (p - 1) * (q - 1) )
__lowercase = (n, e)
__lowercase = (n, d)
return (public_key, private_key)
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
if os.path.exists(f"{name}_pubkey.txt" ) or os.path.exists(f"{name}_privkey.txt" ):
print('''\nWARNING:''' )
print(
f"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
__lowercase , __lowercase = generate_key(lowerCamelCase_ )
print(f"\nWriting public key to file {name}_pubkey.txt..." )
with open(f"{name}_pubkey.txt" , '''w''' ) as out_file:
out_file.write(f"{key_size},{public_key[0]},{public_key[1]}" )
print(f"Writing private key to file {name}_privkey.txt..." )
with open(f"{name}_privkey.txt" , '''w''' ) as out_file:
out_file.write(f"{key_size},{private_key[0]},{private_key[1]}" )
if __name__ == "__main__":
main()
| 217 | 1 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_=0.01 , lowerCAmelCase_=10_00 ):
"""simple docstring"""
_snake_case = p_stop
_snake_case = max_length
def __iter__( self ):
"""simple docstring"""
_snake_case = 0
_snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
_snake_case = random.random() < self.p_stop
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=True ):
"""simple docstring"""
_snake_case = [
BatchSamplerShard(lowerCAmelCase_ , 2 , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
for i in range(2 )
]
_snake_case = [list(lowerCAmelCase_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCAmelCase_ ) for shard in batch_sampler_shards] , [len(lowerCAmelCase_ ) for e in expected] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is very small.
_snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
_snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Check the shards when the dataset is very small.
_snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is very small.
_snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
_snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is very small.
_snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_snake_case = [BatchSamplerShard(lowerCAmelCase_ , 2 , lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=2 , lowerCAmelCase_=False ):
"""simple docstring"""
random.seed(lowerCAmelCase_ )
_snake_case = list(lowerCAmelCase_ )
_snake_case = [
IterableDatasetShard(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=lowerCAmelCase_ , num_processes=lowerCAmelCase_ , process_index=lowerCAmelCase_ , split_batches=lowerCAmelCase_ , )
for i in range(lowerCAmelCase_ )
]
_snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCAmelCase_ )
iterable_dataset_lists.append(list(lowerCAmelCase_ ) )
_snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
self.assertTrue(len(lowerCAmelCase_ ) % shard_batch_size == 0 )
_snake_case = []
for idx in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCAmelCase_ ) < len(lowerCAmelCase_ ):
reference += reference
self.assertListEqual(lowerCAmelCase_ , reference[: len(lowerCAmelCase_ )] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 42
_snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Edge case with a very small dataset
_snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = SkipBatchSampler(lowerCAmelCase_ , 2 )
self.assertListEqual(list(lowerCAmelCase_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
_snake_case = skip_first_batches(lowerCAmelCase_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowerCamelCase ( self ):
"""simple docstring"""
Accelerator()
_snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 42 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
__A = args.pruning_method
__A = args.threshold
__A = args.model_name_or_path.rstrip("/" )
__A = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
__A = torch.load(os.path.join(a_ , "pytorch_model.bin" ) )
__A = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__A = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
__A = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
__A = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
__A = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_ )
__A = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__A = name[:-6]
__A = model[F'''{prefix_}mask_scores''']
__A = TopKBinarizer.apply(a_ , a_ )
__A = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__A = name[:-6]
__A = model[F'''{prefix_}mask_scores''']
__A = ThresholdBinarizer.apply(a_ , a_ , a_ )
__A = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__A = name[:-6]
__A = model[F'''{prefix_}mask_scores''']
__A , __A = -0.1, 1.1
__A = torch.sigmoid(a_ )
__A = s * (r - l) + l
__A = s_bar.clamp(min=0.0 , max=1.0 )
__A = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
__A = os.path.join(
os.path.dirname(a_ ) , F'''bertarized_{os.path.basename(a_ )}''' )
if not os.path.isdir(a_ ):
shutil.copytree(a_ , a_ )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(a_ , os.path.join(a_ , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
SCREAMING_SNAKE_CASE :str = parser.parse_args()
main(args)
| 15 | 0 |
def lowerCAmelCase_ ( __UpperCAmelCase: list ) -> list:
UpperCamelCase__ : Tuple = len(__UpperCAmelCase )
for i in range(1 , __UpperCAmelCase ):
UpperCamelCase__ : str = collection[i]
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = i - 1
while low <= high:
UpperCamelCase__ : List[Any] = (low + high) // 2
if val < collection[mid]:
UpperCamelCase__ : List[Any] = mid - 1
else:
UpperCamelCase__ : Union[str, Any] = mid + 1
for j in range(__UpperCAmelCase , __UpperCAmelCase , -1 ):
UpperCamelCase__ : Tuple = collection[j - 1]
UpperCamelCase__ : Optional[Any] = val
return collection
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 247 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> int:
UpperCamelCase__ : Optional[Any] = git.Repo(search_parent_directories=__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = {
'''repo_id''': str(__UpperCAmelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(__UpperCAmelCase , '''git_log.json''' ) , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=4 )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> Dict:
if params.n_gpu <= 0:
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Union[str, Any] = -1
UpperCamelCase__ : str = True
UpperCamelCase__ : Dict = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCamelCase__ : Optional[int] = int(os.environ['''WORLD_SIZE'''] )
UpperCamelCase__ : Any = int(os.environ['''N_GPU_NODE'''] )
UpperCamelCase__ : Optional[Any] = int(os.environ['''RANK'''] )
# number of nodes / node ID
UpperCamelCase__ : Optional[int] = params.world_size // params.n_gpu_per_node
UpperCamelCase__ : int = params.global_rank // params.n_gpu_per_node
UpperCamelCase__ : Any = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : int = 1
UpperCamelCase__ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCamelCase__ : Any = params.node_id == 0 and params.local_rank == 0
UpperCamelCase__ : Optional[int] = params.n_nodes > 1
# summary
UpperCamelCase__ : List[Any] = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> Tuple:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 247 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = ["""pixel_values"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 2_5_5 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = size if size is not None else {'''shortest_edge''': 2_2_4}
lowerCAmelCase__ :Tuple = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase__ :Any = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE , param_name='crop_size' )
lowerCAmelCase__ :List[Any] = do_resize
lowerCAmelCase__ :int = size
lowerCAmelCase__ :int = resample
lowerCAmelCase__ :Optional[int] = do_center_crop
lowerCAmelCase__ :Optional[int] = crop_size
lowerCAmelCase__ :Optional[Any] = do_rescale
lowerCAmelCase__ :Union[str, Any] = rescale_factor
lowerCAmelCase__ :List[str] = do_normalize
lowerCAmelCase__ :List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ :Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ :Any = do_convert_rgb
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
lowerCAmelCase__ :Any = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['shortest_edge'] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['height'], size['width']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ :Optional[int] = size if size is not None else self.size
lowerCAmelCase__ :List[str] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='size' , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = resample if resample is not None else self.resample
lowerCAmelCase__ :Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ :List[str] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ :Tuple = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='crop_size' , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ :Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ :Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ :Dict = image_std if image_std is not None else self.image_std
lowerCAmelCase__ :List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ :str = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ :Any = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ :str = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowerCAmelCase__ :int = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowerCAmelCase__ :int = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowerCAmelCase__ :int = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowerCAmelCase__ :Union[str, Any] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase__ :Optional[Any] = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase__ :Union[str, Any] = {'''pixel_values''': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 293 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = ["pixel_values"]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , )->None:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ : Tuple = size if size is not None else {'''shortest_edge''': 224}
A_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : Tuple = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
A_ : str = do_resize
A_ : Tuple = size
A_ : Optional[Any] = resample
A_ : Tuple = do_center_crop
A_ : List[Any] = crop_size
A_ : Optional[int] = do_rescale
A_ : Tuple = rescale_factor
A_ : Any = do_normalize
A_ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Any = do_convert_rgb
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
A_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
A_ : Any = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
A_ : str = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->int:
'''simple docstring'''
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )->PIL.Image.Image:
'''simple docstring'''
A_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''size''' , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = resample if resample is not None else self.resample
A_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : List[str] = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : int = do_normalize if do_normalize is not None else self.do_normalize
A_ : Tuple = image_mean if image_mean is not None else self.image_mean
A_ : Tuple = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : Optional[int] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : List[str] = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
A_ : Tuple = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
A_ : Union[str, Any] = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
A_ : Tuple = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
A_ : List[Any] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
A_ : str = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
A_ : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 186 | 0 |
'''simple docstring'''
a_ : int = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 360 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Any = logging.get_logger(__name__)
a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/"""
a_ : Any = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def a_ ( __snake_case : int ) -> Any:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCamelCase_ =key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
import re
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case )
elif re_encoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case )
elif re_encoder_block_proj_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case )
elif re_decoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case )
elif re_decoder_block_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case )
elif re_prior_cond_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case )
elif re_prior_cond_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case )
# keep original key
else:
lowerCamelCase_ =original_key
lowerCamelCase_ =replace_key(__snake_case )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
lowerCamelCase_ =original_key
lowerCamelCase_ =original_key
lowerCamelCase_ =value
return new_dict
@torch.no_grad()
def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content )
lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case )
lowerCamelCase_ =JukeboxModel(__snake_case )
lowerCamelCase_ =[]
lowerCamelCase_ ={}
for i, dict_name in enumerate(__snake_case ):
lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model''']
lowerCamelCase_ ={}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCamelCase_ =old_dic[k]
elif k.endswith('''.w''' ):
lowerCamelCase_ =old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCamelCase_ =old_dic[k]
else:
lowerCamelCase_ =old_dic[k]
lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}'''
lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case )
weight_dict.append(__snake_case )
lowerCamelCase_ =weight_dict.pop(0 )
model.vqvae.load_state_dict(__snake_case )
for i in range(len(__snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(__snake_case , __snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
return weight_dict
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
a_ : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 6 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCamelCase__ :
'''simple docstring'''
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : Optional[jnp.ndarray] = None
lowerCamelCase_ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _lowercase ( cls ) -> Dict:
return cls()
@dataclass
class UpperCamelCase__ (A__ ):
'''simple docstring'''
lowerCamelCase_ : jnp.ndarray
lowerCamelCase_ : jnp.ndarray
lowerCamelCase_ : KarrasVeSchedulerState
class UpperCamelCase__ (A__ , A__ ):
'''simple docstring'''
@property
def _lowercase ( self ) -> Optional[Any]:
return True
@register_to_config
def __init__( self , UpperCamelCase__ = 0.02 , UpperCamelCase__ = 100 , UpperCamelCase__ = 1.007 , UpperCamelCase__ = 80 , UpperCamelCase__ = 0.05 , UpperCamelCase__ = 50 , ) -> str:
pass
def _lowercase ( self ) -> Union[str, Any]:
return KarrasVeSchedulerState.create()
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = () ) -> KarrasVeSchedulerState:
lowerCamelCase : Any = jnp.arange(0 , __lowercase )[::-1].copy()
lowerCamelCase : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__lowercase , schedule=jnp.array(__lowercase , dtype=jnp.floataa ) , timesteps=__lowercase , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase : Optional[Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase : Any = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase : Any = random.split(__lowercase , num=1 )
lowerCamelCase : int = self.config.s_noise * random.normal(key=__lowercase , shape=sample.shape )
lowerCamelCase : Optional[int] = sigma + gamma * sigma
lowerCamelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
lowerCamelCase : Union[str, Any] = sample_hat + sigma_hat * model_output
lowerCamelCase : str = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase : str = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__lowercase , derivative=__lowercase , state=__lowercase )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
lowerCamelCase : Optional[int] = sample_prev + sigma_prev * model_output
lowerCamelCase : str = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__lowercase , derivative=__lowercase , state=__lowercase )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
raise NotImplementedError()
| 48 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCAmelCase_ ( _lowercase : float , _lowercase : float , _lowercase : bool = False) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(_lowercase), magnitude * sin(_lowercase)]
return [magnitude * cos(radians(_lowercase)), magnitude * sin(radians(_lowercase))]
def lowerCAmelCase_ ( _lowercase : NDArray[floataa] , _lowercase : NDArray[floataa] , _lowercase : float = 10**-1) -> bool:
"""simple docstring"""
a__ : NDArray[floataa] = cross(_lowercase , _lowercase)
a__ : float = sum(_lowercase)
return abs(_lowercase) < eps
if __name__ == "__main__":
# Test to check if it works
_lowercase : int =array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_lowercase : NDArray[floataa] =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_lowercase : Union[str, Any] =array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_lowercase : Dict =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_lowercase : Tuple =array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
_lowercase : Any =array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 170 | 0 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ):
"""simple docstring"""
UpperCAmelCase__ = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
UpperCAmelCase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase__ = token_dict["""token"""]
UpperCAmelCase__ = Tokenizer(Unigram() )
UpperCAmelCase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
UpperCAmelCase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase__ = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
UpperCAmelCase__ = TemplateProcessing(
single=f'''$A {self.special_tokens['eos']['token']}''' , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
UpperCAmelCase__ = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ):
"""simple docstring"""
UpperCAmelCase__ = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ):
"""simple docstring"""
UpperCAmelCase__ = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = json.loads(self._tokenizer.to_str() )
UpperCAmelCase__ = self.special_tokens["""unk"""]["""id"""]
UpperCAmelCase__ = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 61 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase__ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowercase )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
snake_case_ = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowercase , id=_lowercase )
| 187 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _a ( __snake_case ):
_lowercase : Dict = "canine"
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict=768 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[Any]=12 , UpperCamelCase_: Optional[int]=3_072 , UpperCamelCase_: str="gelu" , UpperCamelCase_: int=0.1 , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: Union[str, Any]=16_384 , UpperCamelCase_: str=16 , UpperCamelCase_: str=0.02 , UpperCamelCase_: Optional[int]=1E-1_2 , UpperCamelCase_: Optional[Any]=0 , UpperCamelCase_: str=0Xe_0_0_0 , UpperCamelCase_: Tuple=0Xe_0_0_1 , UpperCamelCase_: int=4 , UpperCamelCase_: Dict=4 , UpperCamelCase_: List[Any]=8 , UpperCamelCase_: Union[str, Any]=16_384 , UpperCamelCase_: Tuple=128 , **UpperCamelCase_: str , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
# Character config:
lowercase__ = downsampling_rate
lowercase__ = upsampling_kernel_size
lowercase__ = num_hash_functions
lowercase__ = num_hash_buckets
lowercase__ = local_transformer_stride
| 365 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : int = AltDiffusionPipeline
_lowercase : Tuple = TEXT_TO_IMAGE_PARAMS
_lowercase : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowercase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowercase : int = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
lowercase__ = CLIPTextModel(UpperCamelCase_ )
lowercase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowercase__ = 77
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: str=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self: Optional[Any] ) -> Dict:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self: Dict ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self: Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
torch.manual_seed(0 )
lowercase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase__ = RobertaSeriesModelWithTransformation(UpperCamelCase_ )
lowercase__ = text_encoder
lowercase__ = AltDiffusionPipeline(**UpperCamelCase_ )
lowercase__ = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = self.get_dummy_inputs(UpperCamelCase_ )
lowercase__ = '''A photo of an astronaut'''
lowercase__ = alt_pipe(**UpperCamelCase_ )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
lowercase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase__ = RobertaSeriesModelWithTransformation(UpperCamelCase_ )
lowercase__ = text_encoder
lowercase__ = AltDiffusionPipeline(**UpperCamelCase_ )
lowercase__ = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = self.get_dummy_inputs(UpperCamelCase_ )
lowercase__ = alt_pipe(**UpperCamelCase_ )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=UpperCamelCase_ )
lowercase__ = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = alt_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowercase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = alt_pipe([prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''numpy''' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 93 | 0 |
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = [10, 20, 30, 40, 50, 60]
UpperCamelCase : Optional[int] = [2, 4, 6, 8, 10, 12]
UpperCamelCase : str = 100
self.assertEqual(kp.calc_profit(A_ , A_ , A_ ) , 210 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertRaisesRegex(A_ , "max_weight must greater than zero." )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertRaisesRegex(A_ , "Weight can not be negative." )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertRaisesRegex(A_ , "Profit can not be negative." )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertRaisesRegex(A_ , "max_weight must greater than zero." )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertRaisesRegex(
A_ , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main()
| 52 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 181 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_lowercase : str =logging.get_logger(__name__)
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Optional[Any] = "upernet"
def __init__( self , __lowercase=None , __lowercase=5_1_2 , __lowercase=0.0_2 , __lowercase=[1, 2, 3, 6] , __lowercase=True , __lowercase=0.4 , __lowercase=3_8_4 , __lowercase=2_5_6 , __lowercase=1 , __lowercase=False , __lowercase=2_5_5 , **__lowercase , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__lowercase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
a__ : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowercase , __lowercase ):
a__ : List[Any] = backbone_config.get("""model_type""" )
a__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
a__ : Any = config_class.from_dict(__lowercase )
a__ : Optional[int] = backbone_config
a__ : Any = hidden_size
a__ : Optional[int] = initializer_range
a__ : List[Any] = pool_scales
a__ : Optional[int] = use_auxiliary_head
a__ : Any = auxiliary_loss_weight
a__ : Any = auxiliary_in_channels
a__ : Optional[Any] = auxiliary_channels
a__ : Optional[int] = auxiliary_num_convs
a__ : Optional[int] = auxiliary_concat_input
a__ : str = loss_ignore_index
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[int] = copy.deepcopy(self.__dict__ )
a__ : Tuple = self.backbone_config.to_dict()
a__ : int = self.__class__.model_type
return output
| 266 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : List[str] =logging.get_logger(__name__)
_lowercase : List[str] ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowercase : Optional[Any] ={
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
_lowercase : int ={"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
a__ : Optional[Any] = bs[:]
a__ : List[Any] = 0
for b in range(2**8):
if b not in bs:
bs.append(_lowercase)
cs.append(2**8 + n)
n += 1
a__ : Tuple = [chr(_lowercase) for n in cs]
return dict(zip(_lowercase , _lowercase))
def lowerCAmelCase_ ( _lowercase : Tuple) -> List[str]:
"""simple docstring"""
a__ : int = set()
a__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
a__ : Any = char
return pairs
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :str = VOCAB_FILES_NAMES
__lowerCAmelCase :int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase :Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self , __lowercase , __lowercase , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , **__lowercase , ) -> List[Any]:
"""simple docstring"""
a__ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token
a__ : Optional[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token
a__ : Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token
a__ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token
a__ : List[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token
a__ : str = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a__ : List[str] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
with open(__lowercase , encoding="""utf-8""" ) as vocab_handle:
a__ : str = json.load(__lowercase )
a__ : Dict = {v: k for k, v in self.encoder.items()}
a__ : Any = errors # how to handle errors in decoding
a__ : Union[str, Any] = bytes_to_unicode()
a__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__lowercase , encoding="""utf-8""" ) as merges_handle:
a__ : List[Any] = merges_handle.read().split("""\n""" )[1:-1]
a__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
a__ : str = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
a__ : Optional[Any] = {}
a__ : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a__ : Union[str, Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a__ : List[Any] = tuple(__lowercase )
a__ : Optional[int] = get_pairs(__lowercase )
if not pairs:
return token
while True:
a__ : List[Any] = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ : Dict = bigram
a__ : List[Any] = []
a__ : int = 0
while i < len(__lowercase ):
try:
a__ : str = word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a__ : Optional[int] = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ : List[Any] = tuple(__lowercase )
a__ : Any = new_word
if len(__lowercase ) == 1:
break
else:
a__ : List[Any] = get_pairs(__lowercase )
a__ : Optional[Any] = """ """.join(__lowercase )
a__ : Tuple = word
return word
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : int = []
for token in re.findall(self.pat , __lowercase ):
a__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(""" """ ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[Any]:
"""simple docstring"""
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
return self.decoder.get(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = """""".join(__lowercase )
a__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : int = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
a__ : Optional[Any] = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + """\n""" )
a__ : str = 0
with open(__lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
a__ : Tuple = token_index
writer.write(""" """.join(__lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[int]:
"""simple docstring"""
a__ : Any = [self.sep_token_id]
a__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=False , **__lowercase ) -> int:
"""simple docstring"""
a__ : Tuple = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()):
a__ : Union[str, Any] = """ """ + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[str]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[int]:
"""simple docstring"""
a__ : List[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(__lowercase )
a__ : Optional[int] = """ """.join(__lowercase )
a__ : Any = self.encode(__lowercase )
if len(__lowercase ) > self.model_max_length:
a__ : List[str] = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 266 | 1 |
"""simple docstring"""
from __future__ import annotations
def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, ):
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 320 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__snake_case = logging.get_logger('''transformers.models.speecht5''')
__snake_case = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__snake_case = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__snake_case = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__snake_case = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__snake_case = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__snake_case = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__snake_case = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__snake_case = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__snake_case = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case = []
__snake_case = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split('''.''' ):
_a = getattr(_lowerCAmelCase, _lowerCAmelCase )
if weight_type is not None:
_a = getattr(_lowerCAmelCase, _lowerCAmelCase ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
else:
_a = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int ):
"""simple docstring"""
_a = []
if task == "s2t":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2T
_a = IGNORE_KEYS_S2T
elif task == "t2s":
_a = None
_a = MAPPING_T2S
_a = IGNORE_KEYS_T2S
elif task == "s2s":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2S
_a = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(_lowerCAmelCase, _lowerCAmelCase ):
logger.info(f'{name} was ignored' )
continue
_a = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, hf_model.config.feat_extract_norm == '''group''', )
_a = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
_a = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_a = True
if "*" in mapped_key:
_a = name.split(_lowerCAmelCase )[0].split('''.''' )[-2]
_a = mapped_key.replace('''*''', _lowerCAmelCase )
if "weight_g" in name:
_a = '''weight_g'''
elif "weight_v" in name:
_a = '''weight_v'''
elif "bias" in name:
_a = '''bias'''
elif "weight" in name:
_a = '''weight'''
elif "running_mean" in name:
_a = '''running_mean'''
elif "running_var" in name:
_a = '''running_var'''
elif "num_batches_tracked" in name:
_a = '''num_batches_tracked'''
else:
_a = None
set_recursively(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any], _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = full_name.split('''conv_layers.''' )[-1]
_a = name.split('''.''' )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any]=None, _lowerCAmelCase : List[str]=None, _lowerCAmelCase : int=None, ):
"""simple docstring"""
if config_path is not None:
_a = SpeechTaConfig.from_pretrained(_lowerCAmelCase )
else:
_a = SpeechTaConfig()
if task == "s2t":
_a = config.max_text_positions
_a = SpeechTaForSpeechToText(_lowerCAmelCase )
elif task == "t2s":
_a = 18_76
_a = 6_00
_a = config.max_speech_positions
_a = SpeechTaForTextToSpeech(_lowerCAmelCase )
elif task == "s2s":
_a = 18_76
_a = config.max_speech_positions
_a = SpeechTaForSpeechToSpeech(_lowerCAmelCase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
_a = SpeechTaTokenizer(_lowerCAmelCase, model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_a = AddedToken('''<mask>''', lstrip=_lowerCAmelCase, rstrip=_lowerCAmelCase )
_a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_a = SpeechTaFeatureExtractor()
_a = SpeechTaProcessor(tokenizer=_lowerCAmelCase, feature_extractor=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
_a = torch.load(_lowerCAmelCase )
recursively_load_weights(fairseq_checkpoint['''model'''], _lowerCAmelCase, _lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_lowerCAmelCase )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
) | 320 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : str ={
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] =[
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] =[
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCAmelCase__ : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : str = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
UpperCamelCase__ : str = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
UpperCamelCase__ : str = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
UpperCamelCase__ : Optional[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
UpperCamelCase__ : Optional[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def __lowercase ( ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments,) )
((__SCREAMING_SNAKE_CASE) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=a__ , decoder_config=a__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__SCREAMING_SNAKE_CASE = decoder_config.decoder_start_token_id
__SCREAMING_SNAKE_CASE = decoder_config.pad_token_id
if decoder_start_token_id is None:
__SCREAMING_SNAKE_CASE = decoder_config.bos_token_id
if pad_token_id is None:
__SCREAMING_SNAKE_CASE = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__SCREAMING_SNAKE_CASE = decoder_config.eos_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 118 | 1 |
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = len(_lowerCAmelCase)
UpperCamelCase_ = []
for i in range(len(_lowerCAmelCase) - pat_len + 1):
UpperCamelCase_ = True
for j in range(_lowerCAmelCase):
if s[i + j] != pattern[j]:
UpperCamelCase_ = False
break
if match_found:
position.append(_lowerCAmelCase)
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
| 128 |
# Imports
import numpy as np
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None ):
'''simple docstring'''
self.set_matricies(red=snake_case__ , green=snake_case__ , blue=snake_case__ , red_edge=snake_case__ , nir=snake_case__ )
def _lowerCamelCase ( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None ):
'''simple docstring'''
if red is not None:
UpperCamelCase_ = red
if green is not None:
UpperCamelCase_ = green
if blue is not None:
UpperCamelCase_ = blue
if red_edge is not None:
UpperCamelCase_ = red_edge
if nir is not None:
UpperCamelCase_ = nir
return True
def _lowerCamelCase ( self , snake_case__="" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None ):
'''simple docstring'''
self.set_matricies(red=snake_case__ , green=snake_case__ , blue=snake_case__ , red_edge=snake_case__ , nir=snake_case__ )
UpperCamelCase_ = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def _lowerCamelCase ( self ):
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def _lowerCamelCase ( self ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def _lowerCamelCase ( self ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _lowerCamelCase ( self , snake_case__=0.08 , snake_case__=1.22 , snake_case__=0.03 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir / self.green) - 1
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.nir - self.green
def _lowerCamelCase ( self ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def _lowerCamelCase ( self , snake_case__=0.16 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def _lowerCamelCase ( self , snake_case__=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _lowerCamelCase ( self ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _lowerCamelCase ( self , snake_case__=None , snake_case__=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.nir / self.red
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCamelCase_ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _lowerCamelCase ( self ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.nir / self.red
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def _lowerCamelCase ( self ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 128 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A =logging.get_logger(__name__)
class _snake_case ( __snake_case ):
lowerCAmelCase :Tuple = ['pixel_values']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PIL.Image.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase)
UpperCAmelCase__ : Tuple = size if size is not None else {"""height""": 256, """width""": 256}
UpperCAmelCase__ : Optional[int] = get_size_dict(_lowerCamelCase)
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase__ : List[Any] = get_size_dict(_lowerCamelCase , param_name="""crop_size""")
UpperCAmelCase__ : Optional[int] = do_resize
UpperCAmelCase__ : Dict = size
UpperCAmelCase__ : Any = resample
UpperCAmelCase__ : Union[str, Any] = do_center_crop
UpperCAmelCase__ : List[Any] = crop_size
UpperCAmelCase__ : Any = do_rescale
UpperCAmelCase__ : Optional[Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = do_normalize
UpperCAmelCase__ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PIL.Image.BICUBIC , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : Dict = get_size_dict(_lowerCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''')
return resize(
_lowerCamelCase , size=(size["""height"""], size["""width"""]) , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : List[Any] = get_size_dict(_lowerCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''')
return center_crop(_lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase=None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
UpperCAmelCase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Any = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : str = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(_lowerCamelCase)
UpperCAmelCase__ : str = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : List[str] = get_size_dict(_lowerCamelCase , param_name="""crop_size""")
UpperCAmelCase__ : str = make_list_of_images(_lowerCamelCase)
if not valid_images(_lowerCamelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[int] = [to_numpy_array(_lowerCamelCase) for image in images]
if do_resize:
UpperCAmelCase__ : List[str] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase) for image in images]
if do_center_crop:
UpperCAmelCase__ : Union[str, Any] = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase) for image in images]
if do_rescale:
UpperCAmelCase__ : List[str] = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase) for image in images]
if do_normalize:
UpperCAmelCase__ : List[Any] = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase) for image in images]
UpperCAmelCase__ : str = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase) for image in images]
UpperCAmelCase__ : Any = {"""pixel_values""": images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase) | 362 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__A ='\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__A ='\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
__A ='\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def _UpperCamelCase ( UpperCamelCase__ ):
def remove_articles(UpperCamelCase__ ):
UpperCAmelCase__ : Tuple = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(UpperCamelCase__ , """ """ , UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ ):
UpperCAmelCase__ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
return int(normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ ) )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Any = [any(compute_exact(UpperCamelCase__ , UpperCamelCase__ ) for ref in refs ) for pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ )]
return (sum(UpperCamelCase__ ) / len(UpperCamelCase__ )) * 1_0_0
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCAmelCase__ : List[Any] = Counter(UpperCamelCase__ )
UpperCAmelCase__ : str = Counter(UpperCamelCase__ )
UpperCAmelCase__ : Dict = Counter()
for sgram, scount in sgramcounter.items():
UpperCAmelCase__ : Dict = scount * numref
UpperCAmelCase__ : int = Counter(UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = Counter()
for cgram, ccount in cgramcounter.items():
UpperCAmelCase__ : Union[str, Any] = ccount * numref
# KEEP
UpperCAmelCase__ : str = sgramcounter_rep & cgramcounter_rep
UpperCAmelCase__ : List[Any] = keepgramcounter_rep & rgramcounter
UpperCAmelCase__ : Dict = sgramcounter_rep & rgramcounter
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Union[str, Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : Optional[Any] = 1
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : Optional[int] = keeptmpscorea / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCAmelCase__ : Any = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCAmelCase__ : Any = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCAmelCase__ : str = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCAmelCase__ : str = sgramcounter_rep - cgramcounter_rep
UpperCAmelCase__ : Optional[Any] = delgramcounter_rep - rgramcounter
UpperCAmelCase__ : List[str] = sgramcounter_rep - rgramcounter
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : List[Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase__ : Union[str, Any] = 1
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : Optional[Any] = deltmpscorea / len(UpperCamelCase__ )
# ADDITION
UpperCAmelCase__ : Tuple = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = set(UpperCamelCase__ ) & set(UpperCamelCase__ )
UpperCAmelCase__ : List[str] = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
UpperCAmelCase__ : str = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : List[Any] = 1
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : Optional[int] = addtmpscore / len(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : int = addtmpscore / len(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCAmelCase__ : int = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = len(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = ssent.split(""" """ )
UpperCAmelCase__ : Optional[int] = csent.split(""" """ )
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : List[Any] = []
for rsent in rsents:
UpperCAmelCase__ : List[str] = rsent.split(""" """ )
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Dict = []
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
UpperCAmelCase__ : Optional[int] = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
UpperCAmelCase__ : Union[str, Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
UpperCAmelCase__ : Any = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
ragramslist.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
UpperCAmelCase__ : Optional[int] = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
UpperCAmelCase__ : Dict = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
UpperCAmelCase__ : str = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(UpperCamelCase__ )
for i in range(0 , len(UpperCamelCase__ ) - 1 ):
if i < len(UpperCamelCase__ ) - 1:
UpperCAmelCase__ : Dict = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 2:
UpperCAmelCase__ : int = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(UpperCamelCase__ )
if i < len(UpperCamelCase__ ) - 3:
UpperCAmelCase__ : List[Any] = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Optional[Any] = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : str = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Any = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : Optional[int] = SARIngram(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCAmelCase__ : Union[str, Any] = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCAmelCase__ : Dict = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCAmelCase__ : List[Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ = True , UpperCamelCase__ = "13a" , UpperCamelCase__ = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCAmelCase__ : List[str] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCAmelCase__ : Tuple = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase__ )()(UpperCamelCase__ )
else:
UpperCAmelCase__ : Tuple = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase__ )
elif tokenizer == "moses":
UpperCAmelCase__ : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ , escape=UpperCamelCase__ )
elif tokenizer == "penn":
UpperCAmelCase__ : Dict = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase__ , return_str=UpperCamelCase__ )
else:
UpperCAmelCase__ : List[Any] = sentence
if not return_str:
UpperCAmelCase__ : List[str] = normalized_sent.split()
return normalized_sent
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if not (len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == len(UpperCamelCase__ )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
UpperCAmelCase__ : Optional[int] = 0
for src, pred, refs in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
sari_score += SARIsent(normalize(UpperCamelCase__ ) , normalize(UpperCamelCase__ ) , [normalize(UpperCamelCase__ ) for sent in refs] )
UpperCAmelCase__ : Optional[int] = sari_score / len(UpperCamelCase__ )
return 1_0_0 * sari_score
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="exp" , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ):
UpperCAmelCase__ : int = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
UpperCAmelCase__ : int = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
UpperCAmelCase__ : int = sacrebleu.corpus_bleu(
UpperCamelCase__ , UpperCamelCase__ , smooth_method=UpperCamelCase__ , smooth_value=UpperCamelCase__ , force=UpperCamelCase__ , lowercase=UpperCamelCase__ , use_effective_order=UpperCamelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def snake_case__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""),
}) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = {}
result.update({"""sari""": compute_sari(sources=_lowerCamelCase , predictions=_lowerCamelCase , references=_lowerCamelCase)})
result.update({"""sacrebleu""": compute_sacrebleu(predictions=_lowerCamelCase , references=_lowerCamelCase)})
result.update({"""exact""": compute_em(predictions=_lowerCamelCase , references=_lowerCamelCase)})
return result | 283 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = tempfile.mkdtemp()
# fmt: off
__a : Tuple = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__a : List[Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__a : Dict = {'unk_token': '<unk>'}
__a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
__a : List[str] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
__a : Optional[Any] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : str = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_tokenizer()
__a : Optional[Any] = self.get_rust_tokenizer()
__a : Dict = self.get_image_processor()
__a : Any = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__a : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__a : Union[str, Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__a : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : Any = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a : int = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__a : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_image_processor()
__a : Optional[int] = self.get_tokenizer()
__a : str = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : Dict = self.prepare_image_inputs()
__a : Tuple = image_processor(__a , return_tensors='np' )
__a : List[Any] = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_image_processor()
__a : List[str] = self.get_tokenizer()
__a : Optional[Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : Optional[Any] = 'lower newer'
__a : Union[str, Any] = processor(text=__a )
__a : str = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : List[Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : List[str] = 'lower newer'
__a : Optional[int] = self.prepare_image_inputs()
__a : Tuple = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : Tuple = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : Dict = self.prepare_image_inputs()
__a : List[str] = self.prepare_image_inputs()
__a : int = processor(images=__a , visual_prompt=__a )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.get_image_processor()
__a : Union[str, Any] = self.get_tokenizer()
__a : Union[str, Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Any = processor.batch_decode(__a )
__a : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
| 27 |
import doctest
from collections import deque
import numpy as np
class A_ :
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = [2, 1, 2, -1]
__UpperCAmelCase = [1, 2, 3, 4]
def lowerCAmelCase_ (self ) -> list[float]:
__UpperCAmelCase = len(self.first_signal )
__UpperCAmelCase = len(self.second_signal )
__UpperCAmelCase = max(lowercase__ , lowercase__ )
# create a zero matrix of max_length x max_length
__UpperCAmelCase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__UpperCAmelCase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCAmelCase = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 333 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :int = logging.get_logger(__name__)
lowerCAmelCase__ :Tuple = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __a ( UpperCAmelCase ):
_a : int = 'realm'
def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=1e-3 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=320 , _SCREAMING_SNAKE_CASE=13353718 , _SCREAMING_SNAKE_CASE=5000 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Common config
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = retriever_proj_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = num_candidates
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = layer_norm_eps
# Reader config
_UpperCAmelCase = span_hidden_size
_UpperCAmelCase = max_span_width
_UpperCAmelCase = reader_layer_norm_eps
_UpperCAmelCase = reader_beam_size
_UpperCAmelCase = reader_seq_len
# Retrieval config
_UpperCAmelCase = num_block_records
_UpperCAmelCase = searcher_beam_size
| 185 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ :Any = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Optional[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :str = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[str] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :str = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 185 | 1 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
_a : Any = 6_37_81_37.0
_a : List[str] = 6_35_67_52.31_42_45
_a : Tuple = 6_378_137
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ) -> float:
_lowerCAmelCase : Any = (AXIS_A - AXIS_B) / AXIS_A
_lowerCAmelCase : List[str] = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
_lowerCAmelCase : Dict = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
_lowerCAmelCase : Optional[Any] = radians(_lowerCamelCase )
_lowerCAmelCase : int = radians(_lowerCamelCase )
# Equation
_lowerCAmelCase : Dict = sin((phi_a - phi_a) / 2 )
_lowerCAmelCase : Tuple = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_lowerCAmelCase : int = sqrt(sin_sq_phi + (cos(_lowerCamelCase ) * cos(_lowerCamelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | """simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = """ylacombe/bark-small"""
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = """en_speaker_1"""
_lowerCAmelCase : List[Any] = """This is a test string"""
_lowerCAmelCase : Any = """speaker_embeddings_path.json"""
_lowerCAmelCase : List[Any] = """speaker_embeddings"""
def __A ( self , **a__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
_lowerCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(a__ , **a__ )
_lowerCAmelCase : List[Any] = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=a__ )
_lowerCAmelCase : Dict = processor(text=self.input_string )
_lowerCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 44 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__UpperCAmelCase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__UpperCAmelCase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__UpperCAmelCase )
return parser.parse_args()
def A ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = parse_args()
# Import training_script as a module.
UpperCAmelCase_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase_ = script_fpath.stem
UpperCAmelCase_ = importlib.import_module(__UpperCAmelCase )
# Patch sys.argv
UpperCAmelCase_ = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 344 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] =(DPMSolverSinglestepScheduler,)
UpperCamelCase__ : Tuple =(("num_inference_steps", 25),)
def __a ( self :List[Any] , **_lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_lowercase)
return config
def __a ( self :Union[str, Any] , _lowercase :List[Any]=0 , **_lowercase :Optional[int]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Union[str, Any]) -> List[Any]:
pass
def __a ( self :Optional[Any] , _lowercase :str=0 , **_lowercase :Union[str, Any]) -> Dict:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Dict , _lowercase :Union[str, Any]=None , **_lowercase :List[Any]) -> int:
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :int) -> Tuple:
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_574) < 1E-3
def __a ( self :List[Any]) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :int) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Tuple) -> int:
self.check_over_configs(thresholding=_lowercase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='''dpmsolver++''' , solver_order=_lowercase , solver_type=_lowercase , )
def __a ( self :List[Any]) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
UpperCAmelCase_ = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase).any(), "Samples have nan numbers"
def __a ( self :Tuple) -> int:
self.check_over_configs(lower_order_final=_lowercase)
self.check_over_configs(lower_order_final=_lowercase)
def __a ( self :Tuple) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def __a ( self :Any) -> List[str]:
self.check_over_configs(variance_type=_lowercase)
self.check_over_configs(variance_type='''learned_range''')
def __a ( self :Any) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_248) < 1E-3
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.1_453) < 1E-3
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.0_649) < 1E-3
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
assert sample.dtype == torch.floataa
| 344 | 1 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]="shi-labs/oneformer_demo" ) -> Union[str, Any]:
with open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) as f:
lowerCamelCase_ = json.load(_lowerCamelCase )
lowerCamelCase_ = {}
lowerCamelCase_ = []
lowerCamelCase_ = []
for key, info in class_info.items():
lowerCamelCase_ = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(_lowerCamelCase ) )
lowerCamelCase_ = thing_ids
lowerCamelCase_ = class_names
return metadata
class a ( unittest.TestCase ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : Union[str, Any]=400 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Dict=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Dict=10 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : List[str]=255 , __SCREAMING_SNAKE_CASE : Optional[Any]="shi-labs/oneformer_demo" , __SCREAMING_SNAKE_CASE : Tuple="ade20k_panoptic.json" , __SCREAMING_SNAKE_CASE : Dict=10 , ) -> Optional[Any]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = {'shortest_edge': 32, 'longest_edge': 1333} if size is None else size
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = class_info_file
lowerCamelCase_ = prepare_metadata(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = num_text
lowerCamelCase_ = repo_path
# for the post_process_functions
lowerCamelCase_ = 2
lowerCamelCase_ = 10
lowerCamelCase_ = 10
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = num_labels
lowerCamelCase_ = do_reduce_labels
lowerCamelCase_ = ignore_index
def UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ) -> List[str]:
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = image.size
else:
lowerCamelCase_ , lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size['shortest_edge'] * h / w )
lowerCamelCase_ = self.size['shortest_edge']
elif w > h:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = int(self.size['shortest_edge'] * w / h )
else:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = self.size['shortest_edge']
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ , lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
lowerCamelCase_ = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase ( self : List[Any] ) -> Any:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Any = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
SCREAMING_SNAKE_CASE : int = image_processing_class
def UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase_ = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase ( self : Optional[int] ) -> Any:
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : str ) -> Dict:
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'ignore_index' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'class_info_file' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'num_text' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'repo_path' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'metadata' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_reduce_labels' ) )
def UpperCamelCase ( self : Optional[Any] ) -> Dict:
pass
def UpperCamelCase ( self : Tuple ) -> List[Any]:
# Initialize image_processor
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ , lowerCamelCase_ = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = image_processor(
__SCREAMING_SNAKE_CASE , ['semantic'] * len(__SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processor
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ , lowerCamelCase_ = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = image_processor(
__SCREAMING_SNAKE_CASE , ['semantic'] * len(__SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : str ) -> Any:
# Initialize image_processor
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ , lowerCamelCase_ = self.image_processing_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = image_processor(
__SCREAMING_SNAKE_CASE , ['semantic'] * len(__SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Dict="np" ) -> Any:
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
lowerCamelCase_ = self.image_processing_tester.num_labels
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = prepare_image_inputs(self.image_processing_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
if with_segmentation_maps:
lowerCamelCase_ = num_labels
if is_instance_map:
lowerCamelCase_ = list(range(__SCREAMING_SNAKE_CASE ) ) * 2
lowerCamelCase_ = dict(enumerate(__SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
lowerCamelCase_ = [Image.fromarray(__SCREAMING_SNAKE_CASE ) for annotation in annotations]
lowerCamelCase_ = image_processor(
__SCREAMING_SNAKE_CASE , ['semantic'] * len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , return_tensors='pt' , instance_id_to_semantic_id=__SCREAMING_SNAKE_CASE , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE , )
return inputs
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
def UpperCamelCase ( self : Optional[Any] ) -> Any:
def common(__SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Optional[int]=None ):
lowerCamelCase_ = self.comm_get_image_processor_inputs(
with_segmentation_maps=__SCREAMING_SNAKE_CASE , is_instance_map=__SCREAMING_SNAKE_CASE , segmentation_type=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = inputs['mask_labels']
lowerCamelCase_ = inputs['class_labels']
lowerCamelCase_ = inputs['pixel_values']
lowerCamelCase_ = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=__SCREAMING_SNAKE_CASE )
common(is_instance_map=__SCREAMING_SNAKE_CASE , segmentation_type='pil' )
common(is_instance_map=__SCREAMING_SNAKE_CASE , segmentation_type='pil' )
def UpperCamelCase ( self : str ) -> List[str]:
lowerCamelCase_ = np.zeros((20, 50) )
lowerCamelCase_ = 1
lowerCamelCase_ = 1
lowerCamelCase_ = 1
lowerCamelCase_ = binary_mask_to_rle(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase ( self : str ) -> Optional[Any]:
lowerCamelCase_ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowerCamelCase_ = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase_ = fature_extractor.post_process_semantic_segmentation(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
lowerCamelCase_ = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
lowerCamelCase_ = fature_extractor.post_process_semantic_segmentation(__SCREAMING_SNAKE_CASE , target_sizes=__SCREAMING_SNAKE_CASE )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase ( self : List[str] ) -> Optional[int]:
lowerCamelCase_ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowerCamelCase_ = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase_ = image_processor.post_process_instance_segmentation(__SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(__SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase_ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
lowerCamelCase_ = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase_ = image_processor.post_process_panoptic_segmentation(__SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(__SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 183 |
"""simple docstring"""
import string
def lowerCamelCase__ ( _lowerCamelCase : str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
lowerCamelCase_ = ''
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCamelCase_ = string.ascii_uppercase.find(_lowerCamelCase )
lowerCamelCase_ = num - key
if num < 0:
lowerCamelCase_ = num + len(string.ascii_uppercase )
lowerCamelCase_ = translated + string.ascii_uppercase[num]
else:
lowerCamelCase_ = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def lowerCamelCase__ ( ) -> None:
lowerCamelCase_ = input('Encrypted message: ' )
lowerCamelCase_ = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 183 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
a =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
__lowerCamelCase : Tuple = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ), F"{len(lowerCamelCase__ )} != {len(lowerCamelCase__ )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
a ={
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
a ={
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
try:
__lowerCamelCase : List[str] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = "student" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__lowerCamelCase : int = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
AutoTokenizer.from_pretrained(lowerCamelCase__ ).save_pretrained(lowerCamelCase__ ) # purely for convenience
__lowerCamelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ).eval()
else:
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), F"teacher must be a model or string got type {type(lowerCamelCase__ )}"
__lowerCamelCase : str = teacher.config.to_diff_dict()
try:
__lowerCamelCase , __lowerCamelCase : Dict = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__lowerCamelCase : Optional[int] = teacher_e
if d is None:
__lowerCamelCase : Optional[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__lowerCamelCase , __lowerCamelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__lowerCamelCase , __lowerCamelCase : Any = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__lowerCamelCase : Union[str, Any] = teacher_e
if d is None:
__lowerCamelCase : Any = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCamelCase__ )
# Copy weights
__lowerCamelCase : str = teacher.config_class(**lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_config(lowerCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__lowerCamelCase : Tuple = student.load_state_dict(teacher.state_dict() , strict=lowerCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__lowerCamelCase , __lowerCamelCase : Optional[Any] = list(range(lowerCamelCase__ ) ), list(range(lowerCamelCase__ ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__lowerCamelCase : List[int] = pick_layers_to_copy(lowerCamelCase__ , lowerCamelCase__ )
if d_layers_to_copy is None:
__lowerCamelCase : List[int] = pick_layers_to_copy(lowerCamelCase__ , lowerCamelCase__ )
try:
if hasattr(
lowerCamelCase__ , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCamelCase__ )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
__lowerCamelCase : Dict = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(lowerCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 113 | 0 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ['''flax''', '''transformers''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :List[Any] , *lowerCAmelCase__ :Any , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Union[str, Any] , *lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :str ) -> Any:
requires_backends(cls , ['''flax''', '''transformers'''] )
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''flax''', '''transformers''']
def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :List[Any] ) -> Optional[int]:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Any , *lowerCAmelCase__ :str , **lowerCAmelCase__ :str ) -> Dict:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :str , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :List[str] ) -> Tuple:
requires_backends(cls , ['''flax''', '''transformers'''] )
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ['''flax''', '''transformers''']
def __init__( self :Any , *lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :List[str] ) -> int:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Optional[Any] , *lowerCAmelCase__ :str , **lowerCAmelCase__ :int ) -> Optional[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Dict , *lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :Tuple ) -> Dict:
requires_backends(cls , ['''flax''', '''transformers'''] )
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''flax''', '''transformers''']
def __init__( self :Dict , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :List[str] ) -> Tuple:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Tuple , *lowerCAmelCase__ :int , **lowerCAmelCase__ :int ) -> List[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __magic_name__( cls :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :List[str] ) -> str:
requires_backends(cls , ['''flax''', '''transformers'''] )
| 9 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[str] = (DDIMParallelScheduler,)
__lowerCamelCase : int = (("eta", 0.0), ("num_inference_steps", 50))
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
A : int = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**lowerCamelCase__ )
return config
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
A : Dict = self.scheduler_classes[0]
A : str = self.get_scheduler_config(**lowerCamelCase__ )
A : Tuple = scheduler_class(**lowerCamelCase__ )
A , A : Tuple = 10, 0.0
A : Optional[int] = self.dummy_model()
A : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for t in scheduler.timesteps:
A : Optional[int] = model(lowerCamelCase__, lowerCamelCase__ )
A : str = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ).prev_sample
return sample
def _lowerCAmelCase ( self ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
A : Tuple = self.scheduler_classes[0]
A : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
A : List[Any] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowerCAmelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase__, beta_end=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
self.check_over_configs(thresholding=lowerCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase__, prediction_type=lowerCamelCase__, sample_max_value=lowerCamelCase__, )
def _lowerCAmelCase ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500] ):
self.check_over_forward(time_step=lowerCamelCase__, num_inference_steps=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCamelCase__, eta=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.scheduler_classes[0]
A : List[str] = self.get_scheduler_config()
A : Dict = scheduler_class(**lowerCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420, 400 ) - 0.1_4771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980, 960 ) - 0.3_2460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487, 486 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999, 998 ) - 0.02 ) ) < 1e-5
def _lowerCAmelCase ( self ):
A : int = self.scheduler_classes[0]
A : Any = self.get_scheduler_config()
A : Union[str, Any] = scheduler_class(**lowerCamelCase__ )
A , A : List[str] = 10, 0.0
scheduler.set_timesteps(lowerCamelCase__ )
A : Any = self.dummy_model()
A : Dict = self.dummy_sample_deter
A : Dict = self.dummy_sample_deter + 0.1
A : Tuple = self.dummy_sample_deter - 0.1
A : Tuple = samplea.shape[0]
A : List[Any] = torch.stack([samplea, samplea, samplea], dim=0 )
A : str = torch.arange(lowerCamelCase__ )[0:3, None].repeat(1, lowerCamelCase__ )
A : Any = model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) )
A : Any = scheduler.batch_step_no_noise(lowerCamelCase__, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ), lowerCamelCase__ )
A : List[Any] = torch.sum(torch.abs(lowerCamelCase__ ) )
A : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def _lowerCAmelCase ( self ):
A : Dict = self.full_loop()
A : str = torch.sum(torch.abs(lowerCamelCase__ ) )
A : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.22_3967 ) < 1e-3
def _lowerCAmelCase ( self ):
A : str = self.full_loop(prediction_type="""v_prediction""" )
A : Any = torch.sum(torch.abs(lowerCamelCase__ ) )
A : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def _lowerCAmelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A : Any = self.full_loop(set_alpha_to_one=lowerCamelCase__, beta_start=0.01 )
A : Dict = torch.sum(torch.abs(lowerCamelCase__ ) )
A : str = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def _lowerCAmelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A : int = self.full_loop(set_alpha_to_one=lowerCamelCase__, beta_start=0.01 )
A : List[Any] = torch.sum(torch.abs(lowerCamelCase__ ) )
A : List[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 116 | 0 |
"""simple docstring"""
import sys
lowercase__ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _snake_case ( lowercase__ = N ):
_lowerCamelCase : Optional[Any] = -sys.maxsize - 1
for i in range(len(snake_case__ ) - 12 ):
_lowerCamelCase : Any = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_lowerCamelCase : Optional[Any] = product
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 364 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowercase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowercase__ = []
lowercase__ = []
lowercase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowercase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"""emoji""": True,
},
}
]
lowercase__ = 0
for log in Path().glob("""*.log"""):
lowercase__ = 0
with open(log, """r""") as f:
for line in f:
lowercase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowercase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowercase__ = F"{line['duration']:.4f}"
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowercase__ = []
log.unlink()
lowercase__ = """"""
lowercase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowercase__ = []
lowercase__ = {}
for test in failed_tests:
lowercase__ = test[0].split("""::""")
lowercase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowercase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowercase__ = [test[0] for test in failed_table]
lowercase__ = list(set(files))
# Count number of instances in failed_tests
lowercase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowercase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowercase__ = """Too many failed tests, please see the full report in the Action results."""
lowercase__ = len(err) + 10
lowercase__ = message[: 3000 - offset] + F"\n...\n```\n{err}"
print(F"### {message}")
else:
lowercase__ = """No failed tests! 🤗"""
print(F"## {message}")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowercase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
lowercase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
lowercase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowercase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowercase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowercase__ = row[0]
else:
lowercase__ = """"""
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
) | 12 | 0 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase_ = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase_ = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def __lowercase ( __lowercase ) -> Optional[Any]:
'''simple docstring'''
_A = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowercase )[0]
@deprecated(__lowercase , "Please use tf.data to implement this functionality." )
def __lowercase ( __lowercase ) -> List[Any]:
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__lowercase ) as bytestream:
_A = _readaa(__lowercase )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
_A = _readaa(__lowercase )
_A = _readaa(__lowercase )
_A = _readaa(__lowercase )
_A = bytestream.read(rows * cols * num_images )
_A = numpy.frombuffer(__lowercase , dtype=numpy.uinta )
_A = data.reshape(__lowercase , __lowercase , __lowercase , 1 )
return data
@deprecated(__lowercase , "Please use tf.one_hot on tensors." )
def __lowercase ( __lowercase , __lowercase ) -> int:
'''simple docstring'''
_A = labels_dense.shape[0]
_A = numpy.arange(__lowercase ) * num_classes
_A = numpy.zeros((num_labels, num_classes) )
_A = 1
return labels_one_hot
@deprecated(__lowercase , "Please use tf.data to implement this functionality." )
def __lowercase ( __lowercase , __lowercase=False , __lowercase=10 ) -> List[Any]:
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__lowercase ) as bytestream:
_A = _readaa(__lowercase )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
_A = _readaa(__lowercase )
_A = bytestream.read(__lowercase )
_A = numpy.frombuffer(__lowercase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__lowercase , __lowercase )
return labels
class _UpperCAmelCase :
"""simple docstring"""
@deprecated(
__UpperCAmelCase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self : str , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Optional[Any]=dtypes.floataa , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Optional[int]=None , ):
'''simple docstring'''
_A , _A = random_seed.get_seed(__UpperCAmelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_A = dtypes.as_dtype(__UpperCAmelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
_A = 10000
_A = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
_A = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_A = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_A = images.astype(numpy.floataa )
_A = numpy.multiply(__UpperCAmelCase , 1.0 / 255.0 )
_A = images
_A = labels
_A = 0
_A = 0
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return self._images
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self._labels
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self._num_examples
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self._epochs_completed
def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
'''simple docstring'''
if fake_data:
_A = [1] * 784
_A = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__UpperCAmelCase )],
[fake_label for _ in range(__UpperCAmelCase )],
)
_A = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_A = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCAmelCase )
_A = self.images[perma]
_A = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_A = self._num_examples - start
_A = self._images[start : self._num_examples]
_A = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_A = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCAmelCase )
_A = self.images[perm]
_A = self.labels[perm]
# Start next epoch
_A = 0
_A = batch_size - rest_num_examples
_A = self._index_in_epoch
_A = self._images[start:end]
_A = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_A = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__lowercase , "Please write your own downloading logic." )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
'''simple docstring'''
if not gfile.Exists(__lowercase ):
gfile.MakeDirs(__lowercase )
_A = os.path.join(__lowercase , __lowercase )
if not gfile.Exists(__lowercase ):
urllib.request.urlretrieve(__lowercase , __lowercase ) # noqa: S310
with gfile.GFile(__lowercase ) as f:
_A = f.size()
print("Successfully downloaded" , __lowercase , __lowercase , "bytes." )
return filepath
@deprecated(
__lowercase , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def __lowercase ( __lowercase , __lowercase=False , __lowercase=False , __lowercase=dtypes.floataa , __lowercase=True , __lowercase=5000 , __lowercase=None , __lowercase=DEFAULT_SOURCE_URL , ) -> List[str]:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__lowercase , one_hot=__lowercase , dtype=__lowercase , seed=__lowercase )
_A = fake()
_A = fake()
_A = fake()
return _Datasets(train=__lowercase , validation=__lowercase , test=__lowercase )
if not source_url: # empty string check
_A = DEFAULT_SOURCE_URL
_A = "train-images-idx3-ubyte.gz"
_A = "train-labels-idx1-ubyte.gz"
_A = "t10k-images-idx3-ubyte.gz"
_A = "t10k-labels-idx1-ubyte.gz"
_A = _maybe_download(
__lowercase , __lowercase , source_url + train_images_file )
with gfile.Open(__lowercase , "rb" ) as f:
_A = _extract_images(__lowercase )
_A = _maybe_download(
__lowercase , __lowercase , source_url + train_labels_file )
with gfile.Open(__lowercase , "rb" ) as f:
_A = _extract_labels(__lowercase , one_hot=__lowercase )
_A = _maybe_download(
__lowercase , __lowercase , source_url + test_images_file )
with gfile.Open(__lowercase , "rb" ) as f:
_A = _extract_images(__lowercase )
_A = _maybe_download(
__lowercase , __lowercase , source_url + test_labels_file )
with gfile.Open(__lowercase , "rb" ) as f:
_A = _extract_labels(__lowercase , one_hot=__lowercase )
if not 0 <= validation_size <= len(__lowercase ):
_A = (
"Validation size should be between 0 and "
F'''{len(__lowercase )}. Received: {validation_size}.'''
)
raise ValueError(__lowercase )
_A = train_images[:validation_size]
_A = train_labels[:validation_size]
_A = train_images[validation_size:]
_A = train_labels[validation_size:]
_A = {"dtype": dtype, "reshape": reshape, "seed": seed}
_A = _DataSet(__lowercase , __lowercase , **__lowercase )
_A = _DataSet(__lowercase , __lowercase , **__lowercase )
_A = _DataSet(__lowercase , __lowercase , **__lowercase )
return _Datasets(train=__lowercase , validation=__lowercase , test=__lowercase )
| 79 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__snake_case = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__snake_case = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__snake_case = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
'''simple docstring'''
if return_pvalue:
UpperCamelCase__ :Any = pearsonr(UpperCamelCase_ , UpperCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )} | 97 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Any = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class snake_case__ ( A__ ):
A__ = 'gpt_neox_japanese'
def __init__( self : Dict , __a : List[str]=32000 , __a : List[str]=2560 , __a : Optional[Any]=32 , __a : int=32 , __a : List[str]=4 , __a : Union[str, Any]="gelu" , __a : Optional[Any]=1.0_0 , __a : str=10000 , __a : Dict=2048 , __a : str=0.0_2 , __a : Tuple=1e-5 , __a : Optional[int]=True , __a : Union[str, Any]=31996 , __a : Optional[Any]=31999 , __a : Optional[Any]=0.1 , __a : Union[str, Any]=0.0 , **__a : List[str] , ) -> str:
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__snake_case : List[str] = vocab_size
__snake_case : Optional[Any] = max_position_embeddings
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Dict = intermediate_multiple_size
__snake_case : Optional[Any] = hidden_act
__snake_case : Any = rotary_pct
__snake_case : int = rotary_emb_base
__snake_case : Optional[Any] = initializer_range
__snake_case : List[Any] = layer_norm_eps
__snake_case : Optional[int] = use_cache
__snake_case : Union[str, Any] = attention_dropout
__snake_case : List[Any] = hidden_dropout
| 356 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[Any] = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 0 | 0 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
A__ = tmp_path / '''file.csv'''
A__ = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = tmp_path / '''malformed_file.csv'''
A__ = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = tmp_path / '''csv_with_image.csv'''
A__ = textwrap.dedent(
f"""\
image
{image_file}
""" )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = tmp_path / '''csv_with_label.csv'''
A__ = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = tmp_path / '''csv_with_int_list.csv'''
A__ = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(lowercase_ , '''w''' ) as f:
f.write(lowercase_ )
return str(lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
A__ = Csv()
A__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowercase_ , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(lowercase_ ) in record.message
for record in caplog.records )
@require_pil
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
with open(lowercase_ , encoding='''utf-8''' ) as f:
A__ = f.read().splitlines()[1]
A__ = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
A__ = csv._generate_tables([[csv_file_with_image]] )
A__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
A__ = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
"""simple docstring"""
with open(lowercase_ , encoding='''utf-8''' ) as f:
A__ = f.read().splitlines()[1:]
A__ = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
A__ = csv._generate_tables([[csv_file_with_label]] )
A__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
A__ = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(lowercase_ ) for label in labels]
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda lowercase_ : [int(lowercase_ ) for i in x.split()]} )
A__ = csv._generate_tables([[csv_file_with_int_list]] )
A__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
A__ = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 14 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a_ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =[
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **UpperCamelCase_ ) -> Optional[Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowercase : Union[str, Any] = deprecated_arg[3:]
setattr(self , UpperCamelCase_ , not kwargs.pop(UpperCamelCase_ ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
__lowercase : Dict = kwargs.pop('''torchscript''' , self.torchscript )
__lowercase : str = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
__lowercase : str = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**UpperCamelCase_ )
UpperCamelCase =field(default=snake_case , metadata={"help": "Trace the models using torchscript"} )
UpperCamelCase =field(default=snake_case , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
UpperCamelCase =field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def _lowerCamelCase ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
__lowercase : str = torch.device('''cpu''' )
__lowercase : Optional[Any] = 0
elif is_torch_tpu_available():
__lowercase : str = xm.xla_device()
__lowercase : Any = 0
else:
__lowercase : List[str] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowercase : Any = torch.cuda.device_count()
return device, n_gpu
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
return is_torch_tpu_available() and self.tpu
@property
def _lowerCamelCase ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _lowerCamelCase ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def _lowerCamelCase ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def _lowerCamelCase ( self ) -> Dict:
return self.n_gpu > 0
| 249 | 0 |
"""simple docstring"""
import requests
def lowerCamelCase (a_ :str , a_ :str) -> None:
lowercase :str = {'''Content-Type''': '''application/json'''}
lowercase :Any = requests.post(a_ , json={'''text''': message_body} , headers=a_)
if response.status_code != 200:
lowercase :str = (
'''Request to slack returned an error '''
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(a_)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 356 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "layoutlmv3"
def __init__( self : int , snake_case__ : Any=5_0_2_6_5 , snake_case__ : int=7_6_8 , snake_case__ : Dict=1_2 , snake_case__ : Optional[Any]=1_2 , snake_case__ : Union[str, Any]=3_0_7_2 , snake_case__ : Tuple="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : int=5_1_2 , snake_case__ : int=2 , snake_case__ : Optional[int]=0.02 , snake_case__ : Union[str, Any]=1e-5 , snake_case__ : Optional[int]=1 , snake_case__ : Any=0 , snake_case__ : Optional[int]=2 , snake_case__ : int=1_0_2_4 , snake_case__ : str=1_2_8 , snake_case__ : Tuple=1_2_8 , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=3_2 , snake_case__ : Any=1_2_8 , snake_case__ : List[Any]=6_4 , snake_case__ : List[Any]=2_5_6 , snake_case__ : Any=True , snake_case__ : Optional[Any]=True , snake_case__ : Tuple=True , snake_case__ : List[Any]=2_2_4 , snake_case__ : Optional[int]=3 , snake_case__ : Union[str, Any]=1_6 , snake_case__ : str=None , **snake_case__ : List[str] , ):
'''simple docstring'''
super().__init__(
vocab_size=snake_case__ , hidden_size=snake_case__ , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , intermediate_size=snake_case__ , hidden_act=snake_case__ , hidden_dropout_prob=snake_case__ , attention_probs_dropout_prob=snake_case__ , max_position_embeddings=snake_case__ , type_vocab_size=snake_case__ , initializer_range=snake_case__ , layer_norm_eps=snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , )
lowercase :Optional[int] = max_ad_position_embeddings
lowercase :Tuple = coordinate_size
lowercase :Any = shape_size
lowercase :Union[str, Any] = has_relative_attention_bias
lowercase :Optional[Any] = rel_pos_bins
lowercase :Tuple = max_rel_pos
lowercase :Any = has_spatial_attention_bias
lowercase :Any = rel_ad_pos_bins
lowercase :str = max_rel_ad_pos
lowercase :int = text_embed
lowercase :Optional[int] = visual_embed
lowercase :str = input_size
lowercase :List[str] = num_channels
lowercase :str = patch_size
lowercase :Any = classifier_dropout
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = version.parse("1.12" )
@property
def __snake_case ( self : Any ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __snake_case ( self : int ):
'''simple docstring'''
return 1e-5
@property
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return 1_2
def __snake_case ( self : str , snake_case__ : "ProcessorMixin" , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional["TensorType"] = None , snake_case__ : int = 3 , snake_case__ : int = 4_0 , snake_case__ : int = 4_0 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , snake_case__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase :Dict = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase :Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(snake_case__ )
lowercase :List[str] = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
lowercase :Tuple = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowercase :List[str] = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase :List[Any] = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase :Dict = dict(
processor(
snake_case__ , text=snake_case__ , boxes=snake_case__ , return_tensors=snake_case__ , ) )
return inputs
| 172 | 0 |
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : str =set_counts
__UpperCamelCase : Optional[Any] =max(lowerCamelCase__ )
__UpperCamelCase : Dict =len(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =[1] * num_sets
__UpperCamelCase : Optional[Any] =list(range(lowerCamelCase__ ) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.get_parent(lowerCamelCase__ )
__UpperCamelCase : List[str] =self.get_parent(lowerCamelCase__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCamelCase : int =0
__UpperCamelCase : Any =dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCamelCase : List[str] =self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCamelCase : List[Any] =0
__UpperCamelCase : int =src_parent
__UpperCamelCase : int =self.set_counts[src_parent]
__UpperCamelCase : Tuple =max(self.max_set , lowerCamelCase__ )
return True
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCamelCase : Dict =self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 71 |
"""simple docstring"""
import numpy as np
def __lowercase ( _a ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCamelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def A__ (self , lowerCamelCase , lowerCamelCase=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_lowerCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_lowerCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_lowerCAmelCase = pipe(**lowerCamelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1e-3 )
def A__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(lowerCamelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1 | 363 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE : Dict = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( snake_case_ : Optional[int] ) -> List[str]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def __UpperCAmelCase ( snake_case_ : Union[str, Any] ) -> int:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ ) | 317 | 0 |
from collections.abc import Callable
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : float = a
_lowerCAmelCase : float = b
if function(_lowerCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_lowerCamelCase ) == 0:
return b
elif (
function(_lowerCamelCase ) * function(_lowerCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
_lowerCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_lowerCamelCase ) == 0:
return mid
elif function(_lowerCamelCase ) * function(_lowerCamelCase ) < 0:
_lowerCAmelCase : Tuple = mid
else:
_lowerCAmelCase : Dict = mid
_lowerCAmelCase : List[str] = start + (end - start) / 2.0
return mid
def A ( _lowerCamelCase ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 36 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '''ZinengTang/tvlt-base'''
UpperCamelCase__ : int = tempfile.mkdtemp()
def UpperCAmelCase__ ( self : int , **lowerCamelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , **lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = self.get_image_processor()
UpperCamelCase__ : Union[str, Any] = self.get_feature_extractor()
UpperCamelCase__ : List[str] = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Optional[int] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase__ )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : str = self.get_image_processor()
UpperCamelCase__ : List[Any] = self.get_feature_extractor()
UpperCamelCase__ : Dict = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : Any = np.ones([12000] )
UpperCamelCase__ : Union[str, Any] = feature_extractor(lowerCamelCase__ , return_tensors='''np''' )
UpperCamelCase__ : Any = processor(audio=lowerCamelCase__ , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.get_image_processor()
UpperCamelCase__ : Any = self.get_feature_extractor()
UpperCamelCase__ : int = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : int = np.ones([3, 224, 224] )
UpperCamelCase__ : List[str] = image_processor(lowerCamelCase__ , return_tensors='''np''' )
UpperCamelCase__ : str = processor(images=lowerCamelCase__ , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_feature_extractor()
UpperCamelCase__ : Union[str, Any] = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : List[str] = np.ones([12000] )
UpperCamelCase__ : Tuple = np.ones([3, 224, 224] )
UpperCamelCase__ : Optional[Any] = processor(audio=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def UpperCAmelCase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_image_processor()
UpperCamelCase__ : str = self.get_feature_extractor()
UpperCamelCase__ : Tuple = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 146 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list:
"""simple docstring"""
A__ = len(lowercase_ )
A__ = []
for i in range(len(lowercase_ ) - pat_len + 1 ):
A__ = True
for j in range(lowercase_ ):
if s[i + j] != pattern[j]:
A__ = False
break
if match_found:
position.append(lowercase_ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
| 231 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
if "cls_token" in name:
A__ = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
A__ = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
A__ = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
A__ = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
A__ = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A__ = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
A__ = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
A__ = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
A__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
A__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
A__ = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
A__ = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
A__ = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
A__ = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
A__ = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
A__ = key.split('''.''' )
A__ = int(key_split[1] )
if "decoder_blocks" in key:
A__ = config.decoder_hidden_size
A__ = '''decoder.decoder_layers.'''
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
elif "bias" in key:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = config.hidden_size
A__ = '''vit.encoder.layer.'''
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
elif "bias" in key:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = ViTMAEConfig()
if "large" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
elif "huge" in checkpoint_url:
A__ = 14
A__ = 1_280
A__ = 5_120
A__ = 32
A__ = 16
A__ = ViTMAEForPreTraining(lowercase_ )
A__ = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' )['''model''']
A__ = ViTMAEImageProcessor(size=config.image_size )
A__ = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
A__ = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
A__ = ViTMAEImageProcessor(size=config.image_size )
A__ = image_processor(images=lowercase_ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
A__ = model(**lowercase_ )
A__ = outputs.logits
if "large" in checkpoint_url:
A__ = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
A__ = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
A__ = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase_ , atol=1E-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 231 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.