code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from math import pow, sqrt
def __a(*SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) > 0 and all(value > 0.0 for value in values )
return result
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
| 158 |
'''simple docstring'''
import math
import sys
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE_ ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
_lowerCAmelCase = [-1] * (number + 1)
_lowerCAmelCase = 0
for i in range(1 , number + 1 ):
_lowerCAmelCase = sys.maxsize
_lowerCAmelCase = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
for j in range(1 , root + 1 ):
_lowerCAmelCase = 1 + answers[i - (j**2)]
_lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Tuple = '''xlnet'''
_UpperCamelCase : Any = ['''mems''']
_UpperCamelCase : Optional[int] = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Dict=32000 , _SCREAMING_SNAKE_CASE: Optional[Any]=1024 , _SCREAMING_SNAKE_CASE: int=24 , _SCREAMING_SNAKE_CASE: List[Any]=16 , _SCREAMING_SNAKE_CASE: List[str]=4096 , _SCREAMING_SNAKE_CASE: List[Any]="gelu" , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Union[str, Any]="bi" , _SCREAMING_SNAKE_CASE: Dict=0.02 , _SCREAMING_SNAKE_CASE: List[Any]=1e-12 , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: int=512 , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Optional[Any]=False , _SCREAMING_SNAKE_CASE: Dict=False , _SCREAMING_SNAKE_CASE: str=-1 , _SCREAMING_SNAKE_CASE: Optional[Any]=False , _SCREAMING_SNAKE_CASE: Optional[int]="last" , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: int="tanh" , _SCREAMING_SNAKE_CASE: List[Any]=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=5 , _SCREAMING_SNAKE_CASE: Any=5 , _SCREAMING_SNAKE_CASE: Optional[int]=5 , _SCREAMING_SNAKE_CASE: Dict=1 , _SCREAMING_SNAKE_CASE: Dict=2 , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = vocab_size
UpperCamelCase_ = d_model
UpperCamelCase_ = n_layer
UpperCamelCase_ = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
UpperCamelCase_ = d_model // n_head
UpperCamelCase_ = ff_activation
UpperCamelCase_ = d_inner
UpperCamelCase_ = untie_r
UpperCamelCase_ = attn_type
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = dropout
UpperCamelCase_ = mem_len
UpperCamelCase_ = reuse_len
UpperCamelCase_ = bi_data
UpperCamelCase_ = clamp_len
UpperCamelCase_ = same_length
UpperCamelCase_ = summary_type
UpperCamelCase_ = summary_use_proj
UpperCamelCase_ = summary_activation
UpperCamelCase_ = summary_last_dropout
UpperCamelCase_ = start_n_top
UpperCamelCase_ = end_n_top
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , _SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = kwargs["use_cache"]
UpperCamelCase_ = use_mems_eval
UpperCamelCase_ = use_mems_train
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def lowercase ( self: Dict ) -> List[Any]:
"""simple docstring"""
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 328 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
for param in module.parameters():
UpperCamelCase_ = False
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase_ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Union[str, Any]:
UpperCamelCase_ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def lowerCAmelCase_ ( ) -> List[str]:
UpperCamelCase_ = datetime.now()
UpperCamelCase_ = current_time.strftime("%H:%M:%S" )
return timestamp
| 328 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _snake_case ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE:Any = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:Any = TFAutoModel.from_pretrained(__UpperCAmelCase ,from_pt=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:str = AutoModel.from_pretrained(__UpperCAmelCase ,from_tf=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
@slow
def __UpperCamelCase ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE:Optional[Any] = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(__UpperCAmelCase ,from_pt=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:List[str] = AutoModelForPreTraining.from_pretrained(__UpperCAmelCase ,from_tf=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
@slow
def __UpperCamelCase ( self : Optional[int] ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE:int = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:Optional[int] = TFAutoModelForCausalLM.from_pretrained(__UpperCAmelCase ,from_pt=__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[Any] = TFAutoModelForCausalLM.from_pretrained(
__UpperCAmelCase ,output_loading_info=__UpperCAmelCase ,from_pt=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:int = AutoModelForCausalLM.from_pretrained(__UpperCAmelCase ,from_tf=__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = AutoModelForCausalLM.from_pretrained(
__UpperCAmelCase ,output_loading_info=__UpperCAmelCase ,from_tf=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
@slow
def __UpperCamelCase ( self : Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE:int = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:Optional[int] = TFAutoModelWithLMHead.from_pretrained(__UpperCAmelCase ,from_pt=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:int = AutoModelWithLMHead.from_pretrained(__UpperCAmelCase ,from_tf=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
@slow
def __UpperCamelCase ( self : Dict ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE:Dict = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:str = TFAutoModelForMaskedLM.from_pretrained(__UpperCAmelCase ,from_pt=__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(
__UpperCAmelCase ,output_loading_info=__UpperCAmelCase ,from_pt=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:int = AutoModelForMaskedLM.from_pretrained(__UpperCAmelCase ,from_tf=__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Tuple = AutoModelForMaskedLM.from_pretrained(
__UpperCAmelCase ,output_loading_info=__UpperCAmelCase ,from_tf=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
@slow
def __UpperCamelCase ( self : Optional[Any] ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE:int = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:Dict = TFAutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ,from_pt=__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
__UpperCAmelCase ,output_loading_info=__UpperCAmelCase ,from_pt=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:Any = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ,from_tf=__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
__UpperCAmelCase ,output_loading_info=__UpperCAmelCase ,from_tf=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
@slow
def __UpperCamelCase ( self : int ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE:Optional[int] = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase ,from_pt=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:List[str] = AutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase ,from_tf=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
@slow
def __UpperCamelCase ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE:List[str] = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:Union[str, Any] = TFAutoModelForQuestionAnswering.from_pretrained(__UpperCAmelCase ,from_pt=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
SCREAMING_SNAKE_CASE:Optional[int] = AutoModelForQuestionAnswering.from_pretrained(__UpperCAmelCase ,from_tf=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:Optional[int] = TFAutoModelWithLMHead.from_pretrained(__UpperCAmelCase ,from_pt=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=__UpperCAmelCase ) ,14_410 )
SCREAMING_SNAKE_CASE:Tuple = AutoModelWithLMHead.from_pretrained(__UpperCAmelCase ,from_tf=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=__UpperCAmelCase ) ,14_410 )
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:str = TFAutoModelWithLMHead.from_pretrained(__UpperCAmelCase ,from_pt=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=__UpperCAmelCase ) ,14_410 )
SCREAMING_SNAKE_CASE:str = AutoModelWithLMHead.from_pretrained(__UpperCAmelCase ,from_tf=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase ,__UpperCAmelCase )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=__UpperCAmelCase ) ,14_410 )
| 139 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=32 ,__UpperCAmelCase=2 ,__UpperCAmelCase=4 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,__UpperCAmelCase=0 ,) -> Dict:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = projection_dim
def snake_case__ ( self ) -> Optional[Any]:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A__ = ids_tensor([self.batch_size] ,self.num_choices )
A__ = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCAmelCase ,initializer_range=self.initializer_range ,)
A__ = DPRConfig(projection_dim=self.projection_dim ,**config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
A__ = TFDPRContextEncoder(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = TFDPRQuestionEncoder(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
A__ = TFDPRReader(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape ,(self.batch_size,) )
def snake_case__ ( self ) -> int:
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class UpperCamelCase__( __A , __A , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : str = False
def snake_case__ ( self ) -> str:
A__ = TFDPRModelTester(self )
A__ = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=37 )
def snake_case__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__UpperCAmelCase )
def snake_case__ ( self ) -> List[str]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__UpperCAmelCase )
@slow
def snake_case__ ( self ) -> int:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRContextEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRContextEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRQuestionEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRReader.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class UpperCamelCase__( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> Optional[Any]:
A__ = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
A__ = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
A__ = model(__UpperCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
A__ = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 221 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = False
while is_sorted is False: # Until all the indices are traversed keep looping
A = True
for i in range(0 , len(lowercase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
A , A = input_list[i + 1], input_list[i]
# swapping if elements not in order
A = False
for i in range(1 , len(lowercase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
A , A = input_list[i + 1], input_list[i]
# swapping if elements not in order
A = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
__A : str = [int(x) for x in input().split()]
# inputing elements of the list in one line
__A : int = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 57 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__A : Optional[Any] = random.Random()
if is_torch_available():
import torch
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
"""simple docstring"""
if rng is None:
A = global_rng
A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __UpperCamelCase ( unittest.TestCase ):
def __init__(self : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : Optional[int]=4_0_0 , __SCREAMING_SNAKE_CASE : Optional[int]=2_0_0_0 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : int=1_6_0_0_0 , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , ):
A = parent
A = batch_size
A = min_seq_length
A = max_seq_length
A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A = feature_size
A = padding_value
A = sampling_rate
A = return_attention_mask
A = do_normalize
def SCREAMING_SNAKE_CASE__ (self : Tuple):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE__ (self : Any , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Any=False):
def _flatten(__SCREAMING_SNAKE_CASE : Dict):
return list(itertools.chain(*__SCREAMING_SNAKE_CASE))
if equal_length:
A = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
A = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
A = [np.asarray(__SCREAMING_SNAKE_CASE) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __UpperCamelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE = ASTFeatureExtractor
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
A = ASTFeatureExtractionTester(self)
def SCREAMING_SNAKE_CASE__ (self : Dict):
# Tests that all call wrap to encode_plus and batch_encode_plus
A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
A = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
A = [np.asarray(__SCREAMING_SNAKE_CASE) for speech_input in speech_inputs]
# Test not batched input
A = feat_extract(speech_inputs[0] , return_tensors="np").input_values
A = feat_extract(np_speech_inputs[0] , return_tensors="np").input_values
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3))
# Test batched
A = feat_extract(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="np").input_values
A = feat_extract(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3))
# Test 2-D numpy arrays are batched.
A = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
A = np.asarray(__SCREAMING_SNAKE_CASE)
A = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors="np").input_values
A = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors="np").input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3))
@require_torch
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
import torch
A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
A = np.random.rand(1_0_0).astype(np.floataa)
A = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np")
self.assertTrue(np_processed.input_values.dtype == np.floataa)
A = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt")
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict):
from datasets import load_dataset
A = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation")
# automatic decoding with librispeech
A = ds.sort("id").select(range(__SCREAMING_SNAKE_CASE))[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def SCREAMING_SNAKE_CASE__ (self : Dict):
# fmt: off
A = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9])
# fmt: on
A = self._load_datasamples(1)
A = ASTFeatureExtractor()
A = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors="pt").input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8))
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 57 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :jnp.ndarray
UpperCAmelCase_ :jnp.ndarray
class _SCREAMING_SNAKE_CASE ( nn.Module ):
UpperCAmelCase_ :int
UpperCAmelCase_ :Tuple[int] = (16, 32, 96, 256)
UpperCAmelCase_ :jnp.dtype = jnp.floataa
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase_ :int = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCAmelCase_ :Union[str, Any] = self.block_out_channels[i]
lowerCAmelCase_ :Optional[int] = self.block_out_channels[i + 1]
lowerCAmelCase_ :int = nn.Conv(
__A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__A )
lowerCAmelCase_ :List[str] = nn.Conv(
__A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__A )
lowerCAmelCase_ :Optional[int] = blocks
lowerCAmelCase_ :int = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __A ) -> Tuple:
lowerCAmelCase_ :Dict = self.conv_in(__A )
lowerCAmelCase_ :List[str] = nn.silu(__A )
for block in self.blocks:
lowerCAmelCase_ :Any = block(__A )
lowerCAmelCase_ :Optional[int] = nn.silu(__A )
lowerCAmelCase_ :List[Any] = self.conv_out(__A )
return embedding
@flax_register_to_config
class _SCREAMING_SNAKE_CASE ( nn.Module , A__ , A__ ):
UpperCAmelCase_ :int = 32
UpperCAmelCase_ :int = 4
UpperCAmelCase_ :Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase_ :Union[bool, Tuple[bool]] = False
UpperCAmelCase_ :Tuple[int] = (320, 640, 1280, 1280)
UpperCAmelCase_ :int = 2
UpperCAmelCase_ :Union[int, Tuple[int]] = 8
UpperCAmelCase_ :Optional[Union[int, Tuple[int]]] = None
UpperCAmelCase_ :int = 1280
UpperCAmelCase_ :float = 0.0
UpperCAmelCase_ :bool = False
UpperCAmelCase_ :jnp.dtype = jnp.floataa
UpperCAmelCase_ :bool = True
UpperCAmelCase_ :int = 0
UpperCAmelCase_ :str = "rgb"
UpperCAmelCase_ :Tuple[int] = (16, 32, 96, 256)
def __lowerCAmelCase ( self , __A ) -> FrozenDict:
# init input tensors
lowerCAmelCase_ :Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase_ :Dict = jnp.zeros(__A , dtype=jnp.floataa )
lowerCAmelCase_ :List[Any] = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase_ :Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase_ :Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCAmelCase_ :Optional[int] = jnp.zeros(__A , dtype=jnp.floataa )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = jax.random.split(__A )
lowerCAmelCase_ :Optional[int] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__A , __A , __A , __A , __A )["params"]
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Union[str, Any] = self.block_out_channels
lowerCAmelCase_ :int = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase_ :Dict = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase_ :int = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase_ :Optional[Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase_ :Optional[Any] = FlaxTimestepEmbedding(__A , dtype=self.dtype )
lowerCAmelCase_ :int = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCAmelCase_ :List[str] = self.only_cross_attention
if isinstance(__A , __A ):
lowerCAmelCase_ :List[str] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__A , __A ):
lowerCAmelCase_ :Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase_ :Dict = []
lowerCAmelCase_ :Optional[Any] = []
lowerCAmelCase_ :Dict = block_out_channels[0]
lowerCAmelCase_ :List[Any] = nn.Conv(
__A , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__A )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase_ :List[Any] = output_channel
lowerCAmelCase_ :List[str] = block_out_channels[i]
lowerCAmelCase_ :Tuple = i == len(__A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase_ :Tuple = FlaxCrossAttnDownBlockaD(
in_channels=__A , out_channels=__A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCAmelCase_ :Optional[int] = FlaxDownBlockaD(
in_channels=__A , out_channels=__A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__A )
for _ in range(self.layers_per_block ):
lowerCAmelCase_ :List[str] = nn.Conv(
__A , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__A )
if not is_final_block:
lowerCAmelCase_ :str = nn.Conv(
__A , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__A )
lowerCAmelCase_ :List[Any] = down_blocks
lowerCAmelCase_ :Optional[Any] = controlnet_down_blocks
# mid
lowerCAmelCase_ :int = block_out_channels[-1]
lowerCAmelCase_ :List[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=__A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCAmelCase_ :Dict = nn.Conv(
__A , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __A , __A , __A , __A , __A = 1.0 , __A = True , __A = False , ) -> Union[FlaxControlNetOutput, Tuple]:
lowerCAmelCase_ :Union[str, Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCAmelCase_ :Optional[int] = jnp.flip(__A , axis=1 )
# 1. time
if not isinstance(__A , jnp.ndarray ):
lowerCAmelCase_ :List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__A , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase_ :str = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase_ :Union[str, Any] = jnp.expand_dims(__A , 0 )
lowerCAmelCase_ :List[Any] = self.time_proj(__A )
lowerCAmelCase_ :Optional[Any] = self.time_embedding(__A )
# 2. pre-process
lowerCAmelCase_ :int = jnp.transpose(__A , (0, 2, 3, 1) )
lowerCAmelCase_ :List[Any] = self.conv_in(__A )
lowerCAmelCase_ :Union[str, Any] = jnp.transpose(__A , (0, 2, 3, 1) )
lowerCAmelCase_ :List[str] = self.controlnet_cond_embedding(__A )
sample += controlnet_cond
# 3. down
lowerCAmelCase_ :Any = (sample,)
for down_block in self.down_blocks:
if isinstance(__A , __A ):
lowerCAmelCase_ , lowerCAmelCase_ :Any = down_block(__A , __A , __A , deterministic=not train )
else:
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = down_block(__A , __A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCAmelCase_ :int = self.mid_block(__A , __A , __A , deterministic=not train )
# 5. contronet blocks
lowerCAmelCase_ :Dict = ()
for down_block_res_sample, controlnet_block in zip(__A , self.controlnet_down_blocks ):
lowerCAmelCase_ :Union[str, Any] = controlnet_block(__A )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase_ :Optional[Any] = controlnet_down_block_res_samples
lowerCAmelCase_ :List[Any] = self.controlnet_mid_block(__A )
# 6. scaling
lowerCAmelCase_ :List[Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__A , mid_block_res_sample=__A )
| 84 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
__UpperCAmelCase : Tuple = XGLMConfig
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : Union[str, Any] = "gelu"
def __init__( self : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=1_4, UpperCAmelCase__ : str=7, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[Any]=True, UpperCAmelCase__ : int=True, UpperCAmelCase__ : List[str]=9_9, UpperCAmelCase__ : Union[str, Any]=3_2, UpperCAmelCase__ : Union[str, Any]=2, UpperCAmelCase__ : Union[str, Any]=4, UpperCAmelCase__ : Tuple=3_7, UpperCAmelCase__ : List[Any]="gelu", UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Tuple=5_1_2, UpperCAmelCase__ : Optional[Any]=0.02, ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = d_model
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = ffn_dim
__lowercase = activation_function
__lowercase = activation_dropout
__lowercase = attention_dropout
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = None
__lowercase = 0
__lowercase = 2
__lowercase = 1
def _lowercase ( self : Union[str, Any] ):
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _lowercase ( self : Tuple ):
__lowercase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = self.get_config()
__lowercase = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self : List[Any] ):
return XGLMConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=UpperCAmelCase__, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=UpperCAmelCase__, )
def _lowercase ( self : Dict ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCAmelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCAmelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def _lowercase ( self : Optional[Any] ):
__lowercase = TFXGLMModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, n_embd=3_7 )
def _lowercase ( self : Any ):
self.config_tester.run_common_tests()
@slow
def _lowercase ( self : List[str] ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFXGLMModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _lowercase ( self : int ):
super().test_resize_token_embeddings()
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int]=True ):
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__lowercase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]], dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowercase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[Any] ):
__lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
__lowercase = tokenizer("Today is a nice day and", return_tensors="tf" )
__lowercase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
__lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, seed=[7, 0] )
__lowercase = tokenizer.decode(output_ids[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
@slow
def _lowercase ( self : Dict ):
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__lowercase = "left"
# use different length sentences to test batching
__lowercase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
__lowercase = tokenizer(UpperCAmelCase__, return_tensors="tf", padding=UpperCAmelCase__ )
__lowercase = inputs["input_ids"]
__lowercase = model.generate(input_ids=UpperCAmelCase__, attention_mask=inputs["attention_mask"], max_new_tokens=1_2 )
__lowercase = tokenizer(sentences[0], return_tensors="tf" ).input_ids
__lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 )
__lowercase = tokenizer(sentences[1], return_tensors="tf" ).input_ids
__lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 )
__lowercase = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.decode(output_non_padded[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.decode(output_padded[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, [non_padded_sentence, padded_sentence] )
| 17 | 0 |
from PIL import Image
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Tuple ) -> Union[str, Any]:
__lowerCAmelCase : Any = image.size
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Optional[Any] = image.load()
for i in range(a__ ):
for j in range(a__ ):
__lowerCAmelCase : str = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(a__ ):
for i in range(a__ ):
__lowerCAmelCase : Any = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_UpperCAmelCase = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path') | 369 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase = 'bart'
_UpperCAmelCase = True
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
__lowerCAmelCase : Dict = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__lowerCAmelCase : str = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__lowerCAmelCase : Tuple = qar_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase : str = (None, None)
if MODEL_TYPE == "bart":
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__lowerCAmelCase : str = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__lowerCAmelCase : str = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__lowerCAmelCase : List[str] = sas_model.eval()
else:
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
if LOAD_DENSE_INDEX:
__lowerCAmelCase : List[str] = faiss.StandardGpuResources()
__lowerCAmelCase : Optional[int] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__lowerCAmelCase : Optional[int] = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__lowerCAmelCase : int = faiss.IndexFlatIP(128 )
__lowerCAmelCase : Optional[int] = faiss.index_cpu_to_gpu(SCREAMING_SNAKE_CASE , 1 , SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__lowerCAmelCase , __lowerCAmelCase : Dict = (None, None)
__lowerCAmelCase : List[Any] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__lowerCAmelCase : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__lowerCAmelCase : int = elia["""train_eli5"""]
__lowerCAmelCase : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__lowerCAmelCase : List[Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = load_models()
_UpperCAmelCase , _UpperCAmelCase = load_train_data()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[str]=10 ) -> List[str]:
__lowerCAmelCase : Optional[Any] = embed_questions_for_retrieval([question] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = eli5_train_q_index.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = [elia_train[int(SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict="wiki40b" , SCREAMING_SNAKE_CASE :List[Any]="dense" , SCREAMING_SNAKE_CASE :List[str]=10 ) -> str:
if source == "none":
__lowerCAmelCase , __lowerCAmelCase : Any = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = query_qa_dense_index(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = query_es_index(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index_name="""english_wiki40b_snippets_100w""" , n_results=SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__lowerCAmelCase : str = """question: {} context: {}""".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda SCREAMING_SNAKE_CASE : None),
} )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :int=64 , SCREAMING_SNAKE_CASE :int=256 , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :int=0.95 , SCREAMING_SNAKE_CASE :Any=0.8 ) -> str:
with torch.no_grad():
__lowerCAmelCase : Any = qa_sas_generate(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , temp=SCREAMING_SNAKE_CASE , top_p=SCREAMING_SNAKE_CASE , top_k=SCREAMING_SNAKE_CASE , max_input_length=1_024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_UpperCAmelCase = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_UpperCAmelCase = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_UpperCAmelCase = st.sidebar.checkbox('Demo options')
if demo_options:
_UpperCAmelCase = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_UpperCAmelCase = action_list.index(action_st)
_UpperCAmelCase = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_UpperCAmelCase = show_type == 'Show full text of passages'
else:
_UpperCAmelCase = 3
_UpperCAmelCase = True
_UpperCAmelCase = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_UpperCAmelCase = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_UpperCAmelCase = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_UpperCAmelCase = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_UpperCAmelCase = 'wiki40b'
_UpperCAmelCase = 'dense'
_UpperCAmelCase = 'beam'
_UpperCAmelCase = 2
_UpperCAmelCase = 64
_UpperCAmelCase = 256
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = st.sidebar.checkbox('Generation options')
if generate_options:
_UpperCAmelCase = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_UpperCAmelCase = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_UpperCAmelCase = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_UpperCAmelCase = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_UpperCAmelCase = None
# start main text
_UpperCAmelCase = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_UpperCAmelCase = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase = st.text_input('Enter your question here:', '')
else:
_UpperCAmelCase = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase = make_support(question, source=wiki_source, method='dense', n_results=10)
_UpperCAmelCase , _UpperCAmelCase = make_support(question, source=wiki_source, method='sparse', n_results=10)
_UpperCAmelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase = support_list[:10]
_UpperCAmelCase = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_UpperCAmelCase = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_UpperCAmelCase = res[1].strip()
if sec_titles == "":
_UpperCAmelCase = '[{}]({})'.format(res[0], wiki_url)
else:
_UpperCAmelCase = sec_titles.split(' & ')
_UpperCAmelCase = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase = find_nearest_training(question)
_UpperCAmelCase = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_UpperCAmelCase = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_UpperCAmelCase = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True) | 232 | 0 |
'''simple docstring'''
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> float:
__lowerCamelCase : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def a_ ( ) -> Optional[int]:
print(sum_of_series(1 ,1 ,10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 208 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =["""image_processor""", """tokenizer"""]
a_ ="""OwlViTImageProcessor"""
a_ =("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Any , _a : str=None , _a : Optional[Any]=None , **_a : int ) -> List[str]:
__lowerCamelCase : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _a , )
__lowerCamelCase : Tuple = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_a , _a )
def __call__( self : Tuple , _a : Tuple=None , _a : int=None , _a : List[Any]=None , _a : List[Any]="max_length" , _a : Union[str, Any]="np" , **_a : Union[str, Any] ) -> List[str]:
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(_a , _a ) or (isinstance(_a , _a ) and not isinstance(text[0] , _a )):
__lowerCamelCase : Any = [self.tokenizer(_a , padding=_a , return_tensors=_a , **_a )]
elif isinstance(_a , _a ) and isinstance(text[0] , _a ):
__lowerCamelCase : List[Any] = []
# Maximum number of queries across batch
__lowerCamelCase : str = max([len(_a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_a ) != max_num_queries:
__lowerCamelCase : List[Any] = t + [' '] * (max_num_queries - len(_a ))
__lowerCamelCase : Dict = self.tokenizer(_a , padding=_a , return_tensors=_a , **_a )
encodings.append(_a )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__lowerCamelCase : Optional[Any] = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__lowerCamelCase : str = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowerCamelCase : int = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__lowerCamelCase : List[str] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__lowerCamelCase : str = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__lowerCamelCase : Optional[Any] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowerCamelCase : List[Any] = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__lowerCamelCase : int = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__lowerCamelCase : Any = BatchEncoding()
__lowerCamelCase : Dict = input_ids
__lowerCamelCase : str = attention_mask
if query_images is not None:
__lowerCamelCase : Optional[int] = BatchEncoding()
__lowerCamelCase : List[Any] = self.image_processor(
_a , return_tensors=_a , **_a ).pixel_values
__lowerCamelCase : str = query_pixel_values
if images is not None:
__lowerCamelCase : Union[str, Any] = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
__lowerCamelCase : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowerCamelCase : Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def _lowercase ( self : Optional[Any] , *_a : List[str] , **_a : Dict ) -> int:
return self.image_processor.post_process(*_a , **_a )
def _lowercase ( self : str , *_a : str , **_a : List[str] ) -> int:
return self.image_processor.post_process_object_detection(*_a , **_a )
def _lowercase ( self : int , *_a : List[Any] , **_a : Optional[int] ) -> str:
return self.image_processor.post_process_image_guided_detection(*_a , **_a )
def _lowercase ( self : Tuple , *_a : Optional[Any] , **_a : List[Any] ) -> Tuple:
return self.tokenizer.batch_decode(*_a , **_a )
def _lowercase ( self : str , *_a : Optional[Any] , **_a : str ) -> Union[str, Any]:
return self.tokenizer.decode(*_a , **_a )
@property
def _lowercase ( self : Any ) -> Any:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _a , )
return self.image_processor_class
@property
def _lowercase ( self : Union[str, Any] ) -> Any:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _a , )
return self.image_processor
| 208 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCamelCase: Optional[Any] = logging.get_logger(__name__)
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = ['pixel_values']
def __init__( self : Union[str, Any], lowerCAmelCase : bool = True, lowerCAmelCase : Dict[str, int] = None, lowerCAmelCase : float = None, lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR, lowerCAmelCase : bool = True, lowerCAmelCase : Union[int, float] = 1 / 255, lowerCAmelCase : bool = True, lowerCAmelCase : Optional[Union[float, List[float]]] = None, lowerCAmelCase : Optional[Union[float, List[float]]] = None, **lowerCAmelCase : List[str], ) -> Union[str, Any]:
super().__init__(**a_ )
lowercase : int = size if size is not None else {'shortest_edge': 384}
lowercase : int = get_size_dict(a_, default_to_square=a_ )
lowercase : Optional[Any] = do_resize
lowercase : Dict = size
# Default value set here for backwards compatibility where the value in config is None
lowercase : List[Any] = crop_pct if crop_pct is not None else 224 / 256
lowercase : int = resample
lowercase : int = do_rescale
lowercase : List[Any] = rescale_factor
lowercase : List[str] = do_normalize
lowercase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase ( self : Optional[Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : Dict[str, int], lowerCAmelCase : float, lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : Any, ) -> Union[str, Any]:
lowercase : List[str] = get_size_dict(a_, default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowercase : Any = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowercase : str = int(shortest_edge / crop_pct )
lowercase : List[str] = get_resize_output_image_size(a_, size=a_, default_to_square=a_ )
lowercase : Optional[int] = resize(image=a_, size=a_, resample=a_, data_format=a_, **a_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=a_, size=(shortest_edge, shortest_edge), data_format=a_, **a_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
a_, size=(shortest_edge, shortest_edge), resample=a_, data_format=a_, **a_ )
def lowercase ( self : Union[str, Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : Union[int, float], lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : Tuple, ) -> Tuple:
return rescale(a_, scale=a_, data_format=a_, **a_ )
def lowercase ( self : str, lowerCAmelCase : np.ndarray, lowerCAmelCase : Union[float, List[float]], lowerCAmelCase : Union[float, List[float]], lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : List[str], ) -> int:
return normalize(a_, mean=a_, std=a_, data_format=a_, **a_ )
def lowercase ( self : str, lowerCAmelCase : ImageInput, lowerCAmelCase : bool = None, lowerCAmelCase : Dict[str, int] = None, lowerCAmelCase : float = None, lowerCAmelCase : PILImageResampling = None, lowerCAmelCase : bool = None, lowerCAmelCase : float = None, lowerCAmelCase : bool = None, lowerCAmelCase : Optional[Union[float, List[float]]] = None, lowerCAmelCase : Optional[Union[float, List[float]]] = None, lowerCAmelCase : Optional[Union[str, TensorType]] = None, lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST, **lowerCAmelCase : Optional[Any], ) -> List[Any]:
lowercase : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowercase : List[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowercase : List[str] = resample if resample is not None else self.resample
lowercase : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowercase : str = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : Optional[int] = size if size is not None else self.size
lowercase : Any = get_size_dict(a_, default_to_square=a_ )
lowercase : List[str] = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase : List[str] = [to_numpy_array(a_ ) for image in images]
if do_resize:
lowercase : Tuple = [self.resize(image=a_, size=a_, crop_pct=a_, resample=a_ ) for image in images]
if do_rescale:
lowercase : int = [self.rescale(image=a_, scale=a_ ) for image in images]
if do_normalize:
lowercase : Tuple = [self.normalize(image=a_, mean=a_, std=a_ ) for image in images]
lowercase : Tuple = [to_channel_dimension_format(a_, a_ ) for image in images]
lowercase : Any = {'pixel_values': images}
return BatchFeature(data=a_, tensor_type=a_ )
| 359 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase: str = logging.get_logger(__name__)
_UpperCamelCase: Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'mgp-str'
def __init__( self : Tuple, lowerCAmelCase : str=[32, 128], lowerCAmelCase : List[Any]=4, lowerCAmelCase : Union[str, Any]=3, lowerCAmelCase : Union[str, Any]=27, lowerCAmelCase : Union[str, Any]=38, lowerCAmelCase : Tuple=50257, lowerCAmelCase : Dict=30522, lowerCAmelCase : Optional[int]=768, lowerCAmelCase : Optional[int]=12, lowerCAmelCase : Optional[int]=12, lowerCAmelCase : Union[str, Any]=4.0, lowerCAmelCase : Any=True, lowerCAmelCase : Optional[int]=False, lowerCAmelCase : Optional[int]=1e-5, lowerCAmelCase : List[str]=0.0, lowerCAmelCase : Optional[Any]=0.0, lowerCAmelCase : List[str]=0.0, lowerCAmelCase : Dict=False, lowerCAmelCase : Union[str, Any]=0.02, **lowerCAmelCase : Optional[int], ) -> List[Any]:
super().__init__(**lowerCAmelCase )
lowercase : int = image_size
lowercase : Dict = patch_size
lowercase : List[str] = num_channels
lowercase : Union[str, Any] = max_token_length
lowercase : str = num_character_labels
lowercase : Tuple = num_bpe_labels
lowercase : Tuple = num_wordpiece_labels
lowercase : Optional[Any] = hidden_size
lowercase : Tuple = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : Tuple = mlp_ratio
lowercase : Union[str, Any] = distilled
lowercase : List[str] = layer_norm_eps
lowercase : Optional[int] = drop_rate
lowercase : Tuple = qkv_bias
lowercase : int = attn_drop_rate
lowercase : Any = drop_path_rate
lowercase : Optional[Any] = output_aa_attentions
lowercase : Optional[Any] = initializer_range
| 53 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : str = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 335 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 1 |
import math
__UpperCAmelCase = 10
__UpperCAmelCase = 7
__UpperCAmelCase = BALLS_PER_COLOUR * NUM_COLOURS
def __UpperCamelCase ( lowercase__ : int = 20 ) -> str:
'''simple docstring'''
lowerCAmelCase_ : str = math.comb(lowercase__ , lowercase__ )
lowerCAmelCase_ : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowercase__ )
lowerCAmelCase_ : Optional[Any] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(20))
| 28 |
from math import ceil
def __UpperCamelCase ( lowercase__ : int = 1001 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : Optional[Any] = 2 * i + 1
lowerCAmelCase_ : Union[str, Any] = 2 * i
lowerCAmelCase_ : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 28 | 1 |
from math import pow, sqrt
def _SCREAMING_SNAKE_CASE ( *lowercase : float ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float ):
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase , lowercase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float , lowercase : float ):
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase , lowercase , lowercase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float , lowercase : float ):
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowercase , lowercase , lowercase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float , lowercase : float ):
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowercase , lowercase , lowercase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float , lowercase : float ):
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowercase , lowercase , lowercase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 204 |
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 204 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def a ( __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :str = os.path.join(args.tf_model_dir , '''parameters.json''' )
UpperCamelCase__ :List[str] = json.loads(open(__a ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
UpperCamelCase__ :int = args.output + '''.pt'''
UpperCamelCase__ :int = OrderedDict()
with tf.device('''/CPU:0''' ):
UpperCamelCase__ :Tuple = tf.train.load_checkpoint(args.tf_model_dir )
UpperCamelCase__ :Tuple = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
UpperCamelCase__ :Optional[Any] = reader.get_tensor(__a ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
UpperCamelCase__ :Any = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
UpperCamelCase__ :List[Any] = 8
UpperCamelCase__ :Optional[Any] = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
UpperCamelCase__ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :List[Any] = torch.tensor(__a )
elif key_name.startswith('''model/moe''' ):
UpperCamelCase__ :List[Any] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
UpperCamelCase__ :int = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
UpperCamelCase__ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Dict = torch.tensor(__a )
elif key_name.endswith('''/softmlp/kernel''' ):
UpperCamelCase__ :List[Any] = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
UpperCamelCase__ :str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :int = torch.tensor(__a )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
UpperCamelCase__ :Union[str, Any] = key_name[-9:-7]
for i in range(16 ):
UpperCamelCase__ :Union[str, Any] = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
UpperCamelCase__ :Tuple = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
UpperCamelCase__ :Optional[int] = torch.tensor(__a )
elif key_name.startswith('''model/mlp''' ):
UpperCamelCase__ :Optional[int] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
UpperCamelCase__ :List[Any] = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
UpperCamelCase__ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Optional[Any] = torch.tensor(__a )
elif key_name.endswith('''/p1/bias''' ):
UpperCamelCase__ :Any = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
UpperCamelCase__ :Any = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :Any = torch.tensor(__a )
elif key_name.endswith('''/p2/kernel''' ):
UpperCamelCase__ :Union[str, Any] = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
UpperCamelCase__ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Any = torch.tensor(__a )
elif key_name.endswith('''/p2/bias''' ):
UpperCamelCase__ :List[str] = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
UpperCamelCase__ :Optional[int] = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :List[str] = torch.tensor(__a )
elif key_name.startswith('''model/ln''' ):
UpperCamelCase__ :Dict = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
UpperCamelCase__ :Any = '''model.blocks.%d.feed_forward.norm.bias''' % player
UpperCamelCase__ :Tuple = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :Dict = torch.tensor(__a )
elif key_name.endswith('''/g''' ):
UpperCamelCase__ :str = '''model.blocks.%d.feed_forward.norm.weight''' % player
UpperCamelCase__ :List[str] = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :Union[str, Any] = torch.tensor(__a )
elif key_name.startswith('''model/att''' ):
UpperCamelCase__ :Optional[int] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
UpperCamelCase__ :Tuple = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
UpperCamelCase__ :Union[str, Any] = state[:, 0, :, :]
UpperCamelCase__ :Tuple = state[:, 1, :, :]
UpperCamelCase__ :List[Any] = state[:, 2, :, :]
UpperCamelCase__ :Any = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :str = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :str = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Any = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
UpperCamelCase__ :List[str] = torch.tensor(__a )
UpperCamelCase__ :Optional[Any] = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
UpperCamelCase__ :Optional[Any] = torch.tensor(__a )
UpperCamelCase__ :Any = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
UpperCamelCase__ :List[str] = torch.tensor(__a )
elif key_name.endswith('''/o/kernel''' ):
UpperCamelCase__ :Optional[Any] = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
UpperCamelCase__ :Optional[Any] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :List[Any] = torch.tensor(__a )
elif key_name.startswith('''model/an''' ):
UpperCamelCase__ :Union[str, Any] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
UpperCamelCase__ :Dict = '''model.blocks.%d.self_attn.norm.bias''' % player
UpperCamelCase__ :Any = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :List[Any] = torch.tensor(__a )
elif key_name.endswith('''/g''' ):
UpperCamelCase__ :List[str] = '''model.blocks.%d.self_attn.norm.weight''' % player
UpperCamelCase__ :List[Any] = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :Optional[Any] = torch.tensor(__a )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
UpperCamelCase__ :List[str] = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
UpperCamelCase__ :Optional[Any] = '''model.%s.weight''' % nlayer
UpperCamelCase__ :Dict = vnp.copy() # same in embedded
UpperCamelCase__ :Optional[Any] = torch.tensor(__a )
if key_name.startswith('''model/wte''' ):
UpperCamelCase__ :Any = '''lm_head.weight'''
UpperCamelCase__ :Tuple = vnp.copy() # same in embedded
UpperCamelCase__ :Optional[Any] = torch.tensor(__a )
elif key_name.startswith('''model/wob''' ):
UpperCamelCase__ :List[str] = '''final_logits_bias'''
UpperCamelCase__ :List[Any] = vnp.copy() # same in embedded
UpperCamelCase__ :Any = state.reshape((1, -1) )
UpperCamelCase__ :str = torch.tensor(__a )
elif key_name == "model/dense/kernel":
UpperCamelCase__ :Union[str, Any] = '''model.last_project.weight'''
UpperCamelCase__ :List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :List[str] = torch.tensor(__a )
elif key_name == "model/dense_1/bias":
UpperCamelCase__ :Any = '''model.last_project.bias'''
UpperCamelCase__ :Any = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :Dict = torch.tensor(__a )
torch.save(__a , args.output )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
__snake_case = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 219 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def a ( __a = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def a ( __a = "" ) -> bool:
'''simple docstring'''
if len(__a ) == 0:
return True
UpperCamelCase__ :List[Any] = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCamelCase__ :dict[str, int] = {}
for character in lower_case_input_str:
UpperCamelCase__ :Optional[int] = character_freq_dict.get(__a , 0 ) + 1
UpperCamelCase__ :List[str] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a ( __a = "" ) -> None:
'''simple docstring'''
print('''\nFor string = ''' , __a , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(__a ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(__a ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
__snake_case = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
__snake_case = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""") | 219 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : List[str]=3 , __UpperCamelCase : Union[str, Any]=32 , __UpperCamelCase : Optional[Any]=3 , __UpperCamelCase : Any=10 , __UpperCamelCase : Dict=[8, 16, 32, 64] , __UpperCamelCase : Optional[Any]=[1, 1, 2, 1] , __UpperCamelCase : int=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : Any="relu" , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __UpperCamelCase : int=[2, 3, 4] , __UpperCamelCase : List[str]=1 , ) -> Tuple:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = embeddings_size
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_act
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = len(__UpperCamelCase )
_UpperCamelCase = out_features
_UpperCamelCase = out_indices
_UpperCamelCase = num_groups
def _UpperCamelCase ( self : str ) -> Tuple:
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self : List[str] ) -> Optional[Any]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _UpperCamelCase ( self : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ) -> Any:
_UpperCamelCase = BitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ) -> Union[str, Any]:
_UpperCamelCase = self.num_labels
_UpperCamelCase = BitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ) -> Optional[Any]:
_UpperCamelCase = BitBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCamelCase = None
_UpperCamelCase = BitBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _UpperCamelCase ( self : Dict ) -> str:
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase_ , lowercase_ , unittest.TestCase):
snake_case__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
snake_case__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : List[str] ) -> str:
_UpperCamelCase = BitModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self : List[Any] ) -> Tuple:
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _UpperCamelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _UpperCamelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _UpperCamelCase ( self : Any ) -> Tuple:
pass
def _UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__UpperCamelCase )
_UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _UpperCamelCase ( self : List[Any] ) -> str:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> str:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Tuple:
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__UpperCamelCase )
for name, module in model.named_modules():
if isinstance(__UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def _UpperCamelCase ( self : Tuple ) -> Optional[int]:
def check_hidden_states_output(__UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
_UpperCamelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCamelCase = layer_type
_UpperCamelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
def _UpperCamelCase ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def _UpperCamelCase ( self : Tuple ) -> Any:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = BitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase ( ) -> Dict:
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _UpperCamelCase ( self : List[str] ) -> Tuple:
_UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__UpperCamelCase )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__UpperCamelCase )
# verify the logits
_UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
@require_torch
class UpperCAmelCase_ ( lowercase_ , unittest.TestCase):
snake_case__ = (BitBackbone,) if is_torch_available() else ()
snake_case__ = BitConfig
snake_case__ = False
def _UpperCamelCase ( self : List[str] ) -> Dict:
_UpperCamelCase = BitModelTester(self )
| 256 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowercase :
def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ):
if not conversation_id:
snake_case_ = uuid.uuida()
if past_user_inputs is None:
snake_case_ = []
if generated_responses is None:
snake_case_ = []
snake_case_ = conversation_id
snake_case_ = past_user_inputs
snake_case_ = generated_responses
snake_case_ = text
def __eq__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def a ( self , snake_case , snake_case = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
snake_case_ = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
snake_case_ = text
def a ( self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
snake_case_ = None
def a ( self , snake_case ):
self.generated_responses.append(snake_case )
def a ( self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
snake_case_ = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
snake_case_ = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
lowercase_ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowercase ( lowercase_ ):
def __init__( self , *snake_case , **snake_case ):
super().__init__(*snake_case , **snake_case )
if self.tokenizer.pad_token_id is None:
snake_case_ = self.tokenizer.eos_token
def a ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ):
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
if min_length_for_response is not None:
snake_case_ = min_length_for_response
if minimum_tokens is not None:
snake_case_ = minimum_tokens
if "max_length" in generate_kwargs:
snake_case_ = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
snake_case_ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case )
return preprocess_params, forward_params, postprocess_params
def __call__( self , snake_case , snake_case=0 , **snake_case ):
snake_case_ = super().__call__(snake_case , num_workers=snake_case , **snake_case )
if isinstance(snake_case , snake_case ) and len(snake_case ) == 1:
return outputs[0]
return outputs
def a ( self , snake_case , snake_case=32 ):
if not isinstance(snake_case , snake_case ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
snake_case_ = self.tokenizer._build_conversation_input_ids(snake_case )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
snake_case_ = self._legacy_parse_and_tokenize(snake_case )
if self.framework == "pt":
snake_case_ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
snake_case_ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def a ( self , snake_case , snake_case=10 , **snake_case ):
snake_case_ = generate_kwargs.get('max_length' , self.model.config.max_length )
snake_case_ = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
snake_case_ = max_length - minimum_tokens
snake_case_ = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
snake_case_ = model_inputs['attention_mask'][:, -trim:]
snake_case_ = model_inputs.pop('conversation' )
snake_case_ = max_length
snake_case_ = self.model.generate(**snake_case , **snake_case )
if self.model.config.is_encoder_decoder:
snake_case_ = 1
else:
snake_case_ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def a ( self , snake_case , snake_case=True ):
snake_case_ = model_outputs['output_ids']
snake_case_ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , )
snake_case_ = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(snake_case )
return conversation
def a ( self , snake_case ):
snake_case_ = self.tokenizer.eos_token_id
snake_case_ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
if len(snake_case ) > self.tokenizer.model_max_length:
snake_case_ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 285 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase_ = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowercase_ = {"facebook/blenderbot-3B": 1_28}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = VOCAB_FILES_NAMES
A : int = PRETRAINED_VOCAB_FILES_MAP
A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[str] = ["input_ids", "attention_mask"]
A : str = BlenderbotTokenizer
def __init__( self : Optional[int] , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : int=None , _lowerCAmelCase : List[Any]="replace" , _lowerCAmelCase : Union[str, Any]="<s>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : Any="<s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : str="<mask>" , _lowerCAmelCase : Dict=False , _lowerCAmelCase : List[Any]=True , **_lowerCAmelCase : int , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
__snake_case : Dict = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
__snake_case : Optional[int] = add_prefix_space
__snake_case : Dict = pre_tok_class(**_lowerCAmelCase )
__snake_case : List[str] = add_prefix_space
__snake_case : Union[str, Any] = """post_processor"""
__snake_case : Optional[int] = getattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase )
if tokenizer_component_instance:
__snake_case : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : str = tuple(state["""sep"""] )
if "cls" in state:
__snake_case : List[str] = tuple(state["""cls"""] )
__snake_case : str = False
if state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
__snake_case : Optional[Any] = add_prefix_space
__snake_case : Tuple = True
if state.get("""trim_offsets""" , _lowerCAmelCase ) != trim_offsets:
__snake_case : Dict = trim_offsets
__snake_case : Dict = True
if changes_to_apply:
__snake_case : Any = getattr(_lowerCAmelCase , state.pop("""type""" ) )
__snake_case : int = component_class(**_lowerCAmelCase )
setattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Dict ):
__snake_case : Dict = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else value
__snake_case : Optional[Any] = value
def snake_case__ ( self : List[Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : str ):
__snake_case : List[str] = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Tuple , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : List[str] ):
__snake_case : List[Any] = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
__snake_case : Tuple = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
__snake_case : str = [self.sep_token_id]
__snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self : str , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : "Conversation" ):
__snake_case : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(_lowerCAmelCase )
__snake_case : Any = """ """.join(_lowerCAmelCase )
__snake_case : List[Any] = self.encode(_lowerCAmelCase )
if len(_lowerCAmelCase ) > self.model_max_length:
__snake_case : int = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 20 | from __future__ import annotations
import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : str = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__snake_case : Optional[Any] = math.log(len(__SCREAMING_SNAKE_CASE ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : str=4 , UpperCamelCase__ : List[str]=3_7 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Union[str, Any]=5_1_2 , UpperCamelCase__ : Optional[Any]=1_6 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : str=0.0_2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : List[str]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def A ( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , ):
"""simple docstring"""
UpperCamelCase = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )['hidden_states'][0]
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )['hidden_states'][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = OpenLlamaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(UpperCamelCase__ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'single_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(UpperCamelCase__ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'multi_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(UpperCamelCase__ )
UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def A ( self : Tuple ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def A ( self : Union[str, Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ids_tensor([1, 1_0] , config.vocab_size )
UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
UpperCamelCase = original_model(UpperCamelCase__ ).last_hidden_state
UpperCamelCase = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = {'type': scaling_type, 'factor': 1_0.0}
UpperCamelCase = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
UpperCamelCase = scaled_model(UpperCamelCase__ ).last_hidden_state
UpperCamelCase = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
| 28 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = ComputeEnvironment.AMAZON_SAGEMAKER
_A = True
_A = 'ml.p3.2xlarge'
_A = 'accelerate_sagemaker_execution_role'
_A = 'hf-sm'
_A = 'us-east-1'
_A = 1
_A = 'accelerate-sagemaker-1'
_A = '1.6'
_A = '4.4'
_A = 'train.py'
_A = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_A = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :Any ) -> Optional[int]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__UpperCamelCase : Optional[int] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , a )
assert isinstance(converted_args["do_train"] , a )
assert isinstance(converted_args["epochs"] , a )
assert isinstance(converted_args["learning_rate"] , a )
assert isinstance(converted_args["max_steps"] , a )
with pytest.raises(a ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 232 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , __A : int , __A : str=7 , __A : Union[str, Any]=3 , __A : Union[str, Any]=3_0 , __A : Optional[int]=4_0_0 , __A : Optional[Any]=True , __A : Optional[int]=None , __A : Union[str, Any]=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : Any=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : Optional[Any]=1 / 2_5_5 , __A : Union[str, Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : Optional[Any] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : List[Any] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Tuple = num_channels
snake_case__ : List[Any] = min_resolution
snake_case__ : Optional[Any] = max_resolution
snake_case__ : str = do_resize
snake_case__ : List[str] = size
snake_case__ : List[Any] = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : List[Any] = image_std
snake_case__ : int = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : str = do_pad
def _lowercase ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : Dict , __A : Union[str, Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Any = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : List[str] = int(self.size["shortest_edge"] * h / w )
snake_case__ : Tuple = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Optional[Any] = self.size["shortest_edge"]
snake_case__ : List[Any] = self.size["shortest_edge"]
else:
snake_case__ : Union[str, Any] = []
for image in image_inputs:
snake_case__, snake_case__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Any = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Optional[int] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : Optional[int] ):
snake_case__ : str = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Tuple ):
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Any ):
pass
def _lowercase ( self : Optional[int] ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : Union[str, Any] = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Any ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Union[str, Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image and target
snake_case__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : List[str] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Optional[Any] = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : int = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : int = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Union[str, Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Union[str, Any] ):
# prepare image, target and masks_path
snake_case__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Optional[int] = json.loads(f.read() )
snake_case__ : Any = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : List[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Dict = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Dict = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 286 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deta'
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Dict ):
return self.d_model
def UpperCamelCase__ ( self : List[str] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 63 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: complex , __UpperCAmelCase: str = "x" , __UpperCAmelCase: float = 10**-10 , __UpperCAmelCase: int = 1 , ) -> complex:
UpperCamelCase__ : Optional[int] = symbols(__UpperCAmelCase )
UpperCamelCase__ : Dict = lambdify(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : List[str] = lambdify(__UpperCAmelCase , diff(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCamelCase__ : int = starting_point
while True:
if diff_function(__UpperCAmelCase ) != 0:
UpperCamelCase__ : Optional[int] = prev_guess - multiplicity * func(__UpperCAmelCase ) / diff_function(
__UpperCAmelCase )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase__ : Tuple = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}''')
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
F'''{newton_raphson("log(y) - 1", 2, variable="y")}''',
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
F'''{newton_raphson("exp(x) - 1", 10, precision=0.005)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
| 201 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = {'vocab_file': 'vocab.txt'}
_SCREAMING_SNAKE_CASE : List[Any] = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'YituTech/conv-bert-base': 5_12,
'YituTech/conv-bert-medium-small': 5_12,
'YituTech/conv-bert-small': 5_12,
}
_SCREAMING_SNAKE_CASE : int = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class A__ ( a__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_INIT_CONFIGURATION
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ConvBertTokenizer
def __init__( self , __snake_case=None , __snake_case=None , __snake_case=True , __snake_case="[UNK]" , __snake_case="[SEP]" , __snake_case="[PAD]" , __snake_case="[CLS]" , __snake_case="[MASK]" , __snake_case=True , __snake_case=None , **__snake_case , ):
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , )
snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __snake_case ) != tokenize_chinese_chars
):
snake_case = getattr(__snake_case , normalizer_state.pop('''type''' ) )
snake_case = do_lower_case
snake_case = strip_accents
snake_case = tokenize_chinese_chars
snake_case = normalizer_class(**__snake_case )
snake_case = do_lower_case
def a_ ( self , __snake_case , __snake_case=None ):
snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a_ ( self , __snake_case , __snake_case = None ):
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ ( self , __snake_case , __snake_case = None ):
snake_case = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
| 127 |
'''simple docstring'''
a__ : Union[str, Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a__ : Optional[Any] = True
a__ : Optional[Any] = False
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ = chain(next_number(__A ) )
UpperCamelCase__ = number_chain
while number < 10000000:
UpperCamelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( __A = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , __A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 80 | 0 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=8 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=16 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=36 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self ) -> str:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = self.get_config()
_snake_case = 300
return config
def lowerCAmelCase ( self ) -> Any:
(
_snake_case
) = self.prepare_config_and_inputs()
_snake_case = True
_snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = MraModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
_snake_case = model(_lowerCamelCase , token_type_ids=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> str:
_snake_case = True
_snake_case = MraModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )
_snake_case = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , )
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_snake_case = MraForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = MraForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = self.num_labels
_snake_case = MraForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_snake_case = self.num_labels
_snake_case = MraForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = self.num_choices
_snake_case = MraForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self ) -> str:
_snake_case = self.prepare_config_and_inputs()
(
_snake_case
) = config_and_inputs
_snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( a__ , unittest.TestCase ):
lowerCAmelCase_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = ()
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = MraModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def lowerCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCAmelCase ( self ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCAmelCase ( self ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def lowerCAmelCase ( self ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def lowerCAmelCase ( self ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MraModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip(reason='MRA does not output attentions' )
def lowerCAmelCase ( self ) -> List[Any]:
return
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self ) -> List[str]:
_snake_case = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
_snake_case = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_snake_case = model(_lowerCamelCase )[0]
_snake_case = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowerCamelCase )
_snake_case = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ) -> Optional[Any]:
_snake_case = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
_snake_case = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_snake_case = model(_lowerCamelCase )[0]
_snake_case = 5_0265
_snake_case = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowerCamelCase )
_snake_case = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
_snake_case = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_snake_case = model(_lowerCamelCase )[0]
_snake_case = 5_0265
_snake_case = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _lowerCamelCase )
_snake_case = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
| 354 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase_ = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ = """ResNetConfig"""
# Base docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase_ = """microsoft/resnet-50"""
UpperCAmelCase_ = """tiger cat"""
UpperCAmelCase_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
_snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> Dict:
super().__init__()
_snake_case = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_snake_case = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_snake_case = config.num_channels
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.pooler(lowerCAmelCase_ )
return embedding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ) -> List[Any]:
super().__init__()
_snake_case = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.BatchNormad(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = self.convolution(lowerCAmelCase_ )
_snake_case = self.normalization(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ) -> Any:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ) -> List[str]:
super().__init__()
_snake_case = in_channels != out_channels or stride != 1
_snake_case = out_channels // reduction
_snake_case = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_snake_case = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_snake_case = ACTaFN[activation]
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = hidden_state
_snake_case = self.layer(lowerCAmelCase_ )
_snake_case = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_snake_case = self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Tuple:
super().__init__()
_snake_case = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tensor:
_snake_case = input
for layer in self.layers:
_snake_case = layer(lowerCAmelCase_ )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__()
_snake_case = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ) -> BaseModelOutputWithNoAttention:
_snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
_snake_case = stage_module(lowerCAmelCase_ )
if output_hidden_states:
_snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ResNetConfig
lowerCAmelCase_ = '''resnet'''
lowerCAmelCase_ = '''pixel_values'''
lowerCAmelCase_ = True
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = value
UpperCAmelCase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> int:
super().__init__(lowerCAmelCase_ )
_snake_case = config
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase_ )
_snake_case = config.num_labels
_snake_case = ResNetModel(lowerCAmelCase_ )
# classification head
_snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> ImageClassifierOutputWithNoAttention:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(lowerCAmelCase_ )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _lowerCamelCase , )
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
super().__init__(lowerCAmelCase_ )
super()._init_backbone(lowerCAmelCase_ )
_snake_case = [config.embedding_size] + config.hidden_sizes
_snake_case = ResNetEmbeddings(lowerCAmelCase_ )
_snake_case = ResNetEncoder(lowerCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> BackboneOutput:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = self.embedder(lowerCAmelCase_ )
_snake_case = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_snake_case = outputs.hidden_states
_snake_case = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_snake_case = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
| 295 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = abs(__lowerCAmelCase )
lowercase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = abs(__lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return sum(int(__lowerCAmelCase ) for c in str(abs(__lowerCAmelCase ) ) )
def _SCREAMING_SNAKE_CASE () -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase ) -> None:
lowercase_ = F'''{func.__name__}({value})'''
lowercase_ = timeit(F'''__main__.{call}''' , setup="""import __main__""" )
print(F'''{call:56} = {func(__lowerCAmelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 136 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "gpt_neox"
def __init__( self : Union[str, Any] , lowerCAmelCase_ : str=5_0_4_3_2 , lowerCAmelCase_ : List[Any]=6_1_4_4 , lowerCAmelCase_ : str=4_4 , lowerCAmelCase_ : Tuple=6_4 , lowerCAmelCase_ : Optional[int]=2_4_5_7_6 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Any=0.25 , lowerCAmelCase_ : int=1_0_0_0_0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : List[Any]=1E-5 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = rotary_pct
lowercase_ = rotary_emb_base
lowercase_ = attention_dropout
lowercase_ = hidden_dropout
lowercase_ = classifier_dropout
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_cache
lowercase_ = tie_word_embeddings
lowercase_ = use_parallel_residual
lowercase_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase_) or len(self.rope_scaling) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'''got {self.rope_scaling}''')
lowercase_ = self.rope_scaling.get("""type""" , lowerCAmelCase_)
lowercase_ = self.rope_scaling.get("""factor""" , lowerCAmelCase_)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''')
if rope_scaling_factor is None or not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''')
| 136 | 1 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def A_ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def A_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Union[str, Any]=False ):
SCREAMING_SNAKE_CASE__ = compute_bleu(
reference_corpus=UpperCAmelCase_ , translation_corpus=UpperCAmelCase_ , max_order=UpperCAmelCase_ , smooth=UpperCAmelCase_ )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 358 |
from __future__ import annotations
from statistics import mean
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = burst_time[i]
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = -1
for i in range(UpperCamelCase_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
SCREAMING_SNAKE_CASE__ = i
total_time += burst_time[target_process]
completed += 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * no_of_processes
for i in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case = 4
__snake_case = [2, 5, 3, 7]
__snake_case = [0, 0, 0, 0]
__snake_case = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 169 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Tuple:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
lowercase : Dict = ord(SCREAMING_SNAKE_CASE__ )
if not _is_chinese_char(SCREAMING_SNAKE_CASE__ ):
return 0
return 1
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
lowercase : Union[str, Any] = set()
for token in tokens:
lowercase : List[str] = len(SCREAMING_SNAKE_CASE__ ) > 1 and is_chinese(SCREAMING_SNAKE_CASE__ )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
return word_list
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
if not chinese_word_set:
return bert_tokens
lowercase : List[str] = max([len(SCREAMING_SNAKE_CASE__ ) for w in chinese_word_set] )
lowercase : str = bert_tokens
lowercase , lowercase : Any = 0, len(SCREAMING_SNAKE_CASE__ )
while start < end:
lowercase : Optional[Any] = True
if is_chinese(bert_word[start] ):
lowercase : str = min(end - start , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ , 1 , -1 ):
lowercase : Any = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase : Any = """##""" + bert_word[j]
lowercase : List[Any] = start + i
lowercase : int = False
break
if single_word:
start += 1
return bert_word
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : Any = []
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 100 ):
lowercase : Any = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
lowercase : Tuple = [get_chinese_word(SCREAMING_SNAKE_CASE__ ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = []
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 100 ):
lowercase : Union[str, Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = []
for id in input_ids:
lowercase : Optional[Any] = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE__ )
input_tokens.append(SCREAMING_SNAKE_CASE__ )
lowercase : int = add_sub_symbol(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
if token[:2] == "##":
lowercase : Dict = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE__ ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE__ ) ):
ref_id.append(SCREAMING_SNAKE_CASE__ )
ref_ids.append(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
return ref_ids
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase : Dict = f.readlines()
lowercase : Tuple = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase : Tuple = LTP(args.ltp ) # faster in GPU device
lowercase : Dict = BertTokenizer.from_pretrained(args.bert )
lowercase : Any = prepare_ref(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase : Optional[Any] = [json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
lowercase : Tuple = parser.parse_args()
main(args)
| 20 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __snake_case :
_a : int
_a : TreeNode | None= None
_a : TreeNode | None= None
lowercase : Dict = namedtuple("""CoinsDistribResult""", """moves excess""")
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
if root is None:
return 0
# Validation
def count_nodes(SCREAMING_SNAKE_CASE__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(SCREAMING_SNAKE_CASE__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(SCREAMING_SNAKE_CASE__ ) != count_coins(SCREAMING_SNAKE_CASE__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(SCREAMING_SNAKE_CASE__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase , lowercase : int = get_distrib(node.left )
lowercase , lowercase : List[Any] = get_distrib(node.right )
lowercase : Optional[Any] = 1 - left_distrib_excess
lowercase : Union[str, Any] = 1 - right_distrib_excess
lowercase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(SCREAMING_SNAKE_CASE__ )
+ abs(SCREAMING_SNAKE_CASE__ )
)
lowercase : Any = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return get_distrib(SCREAMING_SNAKE_CASE__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 1 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip | 196 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase : List[Any] =logging.get_logger(__name__)
lowerCamelCase : Optional[Any] ={
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class __a ( A__ , A__ ):
_lowerCAmelCase : Union[str, Any] = '''bit'''
_lowerCAmelCase : List[str] = ['''preactivation''', '''bottleneck''']
_lowerCAmelCase : Any = ['''SAME''', '''VALID''']
def __init__( self : str , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=64 , SCREAMING_SNAKE_CASE : List[Any]=[2_56, 5_12, 10_24, 20_48] , SCREAMING_SNAKE_CASE : Union[str, Any]=[3, 4, 6, 3] , SCREAMING_SNAKE_CASE : str="preactivation" , SCREAMING_SNAKE_CASE : Any="relu" , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : List[Any]=32 , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , **SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
UpperCamelCase__ : Any = global_padding.upper()
else:
raise ValueError(F'Padding strategy {global_padding} not supported' )
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : Dict = embedding_size
UpperCamelCase__ : Tuple = hidden_sizes
UpperCamelCase__ : Any = depths
UpperCamelCase__ : Optional[int] = layer_type
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : str = global_padding
UpperCamelCase__ : Any = num_groups
UpperCamelCase__ : str = drop_path_rate
UpperCamelCase__ : Optional[Any] = embedding_dynamic_padding
UpperCamelCase__ : Tuple = output_stride
UpperCamelCase__ : List[str] = width_factor
UpperCamelCase__ : Any = ["stem"] + [F'stage{idx}' for idx in range(1 , len(SCREAMING_SNAKE_CASE ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE , out_indices=SCREAMING_SNAKE_CASE , stage_names=self.stage_names ) | 196 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=3 , _A=True , _A=True , _A=0.1 , _A=0.1 , _A=224 , _A=1000 , _A=[3, 3, 6, 4] , _A=[48, 56, 112, 220] , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = layer_depths
SCREAMING_SNAKE_CASE_ = embed_dims
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_A , layer_scale_init_value=1E-5 , )
def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = SwiftFormerModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _UpperCamelCase ( self , _A , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ) -> Any:
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase_ =(
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(
self , config_class=_A , has_text_modality=_A , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _UpperCamelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> str:
pass
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_A )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _UpperCamelCase ( self ) -> Tuple:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = SwiftFormerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def _UpperCamelCase ( self ) -> Optional[Any]:
pass
def _UpperCamelCase ( self ) -> List[str]:
def check_hidden_states_output(_A , _A , _A ):
SCREAMING_SNAKE_CASE_ = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_A , _A ) )
SCREAMING_SNAKE_CASE_ = outputs.hidden_states
SCREAMING_SNAKE_CASE_ = 8
self.assertEqual(len(_A ) , _A ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_A ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_A , _A , _A )
def _UpperCamelCase ( self ) -> Any:
def _config_zero_init(_A ):
SCREAMING_SNAKE_CASE_ = copy.deepcopy(_A )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_A , _A , 1E-10 )
if isinstance(getattr(_A , _A , _A ) , _A ):
SCREAMING_SNAKE_CASE_ = _config_zero_init(getattr(_A , _A ) )
setattr(_A , _A , _A )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = _config_zero_init(_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(config=_A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def A__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase ( self ) -> Dict:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_A )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_A )
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
SCREAMING_SNAKE_CASE_ = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 299 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["torch", "scipy"]
def __init__( self , *_A , **_A ) -> Tuple:
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Any:
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCamelCase ( cls , *_A , **_A ) -> Tuple:
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple:
A: List[Any] = []
A: int = set({'''(''', '''[''', '''{'''} )
A: Optional[Any] = set({''')''', ''']''', '''}'''} )
A: Optional[int] = {"{": "}", "[": "]", "(": ")"}
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__SCREAMING_SNAKE_CASE ) == 0 or (len(__SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__SCREAMING_SNAKE_CASE ) == 0
def SCREAMING_SNAKE_CASE( ) -> List[str]:
A: str = input('''Enter sequence of brackets: ''' )
if is_balanced(__SCREAMING_SNAKE_CASE ):
print(__SCREAMING_SNAKE_CASE , '''is balanced''' )
else:
print(__SCREAMING_SNAKE_CASE , '''is not balanced''' )
if __name__ == "__main__":
main()
| 370 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ["""input_features""", """attention_mask"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_60_00 , SCREAMING_SNAKE_CASE_ : int=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = num_mel_bins
A: str = do_ceptral_normalize
A: int = normalize_means
A: List[Any] = normalize_vars
A: Any = True
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , ) -> np.ndarray:
'''simple docstring'''
A: Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
A: Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
A: List[Any] = ta_kaldi.fbank(SCREAMING_SNAKE_CASE_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : float = 0.0 , ) -> np.ndarray:
'''simple docstring'''
if normalize_means:
A: str = x[:input_length].mean(axis=0 )
A: Dict = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if normalize_vars:
A: Tuple = x[:input_length].std(axis=0 )
A: List[Any] = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if input_length < x.shape[0]:
A: Optional[int] = padding_value
# make sure array is in float32
A: Optional[Any] = x.astype(np.floataa )
return x
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[np.ndarray] , SCREAMING_SNAKE_CASE_ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
A: int = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
A: Any = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
A: Optional[Any] = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A: Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
A: int = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A: Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A: Union[str, Any] = [raw_speech]
# extract fbank features
A: str = [self._extract_fbank_features(SCREAMING_SNAKE_CASE_ ) for waveform in raw_speech]
# convert into correct format for padding
A: int = BatchFeature({'''input_features''': features} )
A: int = self.pad(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# make sure list is in array format
A: List[str] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ):
A: Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features]
A: List[Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
A: Dict = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
A: Dict = (
np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A: List[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE_ )
if return_tensors is not None:
A: Dict = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
| 334 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15 |
import math
def UpperCAmelCase ( a_ , a_ = 0 , a_ = 0 ) -> list:
"""simple docstring"""
__A = end or len(a_ )
for i in range(a_ , a_ ):
__A = i
__A = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__A = array[temp_index - 1]
temp_index -= 1
__A = temp_index_value
return array
def UpperCAmelCase ( a_ , a_ , a_ ) -> None: # Max Heap
"""simple docstring"""
__A = index
__A = 2 * index + 1 # Left Node
__A = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__A = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__A = right_index
if largest != index:
__A , __A = array[largest], array[index]
heapify(a_ , a_ , a_ )
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
__A = len(a_ )
for i in range(n // 2 , -1 , -1 ):
heapify(a_ , a_ , a_ )
for i in range(n - 1 , 0 , -1 ):
__A , __A = array[0], array[i]
heapify(a_ , 0 , a_ )
return array
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
__A = low
__A = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__A , __A = array[j], array[i]
i += 1
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) == 0:
return array
__A = 2 * math.ceil(math.loga(len(a_ ) ) )
__A = 1_6
return intro_sort(a_ , 0 , len(a_ ) , a_ , a_ )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a_ )
max_depth -= 1
__A = median_of_a(a_ , a_ , start + ((end - start) // 2) + 1 , end - 1 )
__A = partition(a_ , a_ , a_ , a_ )
intro_sort(a_ , a_ , a_ , a_ , a_ )
__A = p
return insertion_sort(a_ , a_ , a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma : ').strip()
SCREAMING_SNAKE_CASE :str = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 15 | 1 |
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase : Tuple = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase : int = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
re.sub("""<n>""" , """""" , lowerCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCamelCase__ ) )
| 370 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowercase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , A ) -> Any:
'''simple docstring'''
super().__init__()
lowerCamelCase = model
lowerCamelCase = 2
lowerCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __A ( self ) -> int:
'''simple docstring'''
pass
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = LongformerModel.from_pretrained(lowerCamelCase__ )
lowerCamelCase = LightningModel(lowerCamelCase__ )
lowerCamelCase = torch.load(lowerCamelCase__ , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
lowerCamelCase = LongformerForQuestionAnswering.from_pretrained(lowerCamelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCamelCase__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 66 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(lowercase_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 78 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ = get_logger(__name__)
class _UpperCamelCase ( enum.Enum ):
'''simple docstring'''
lowerCamelCase__ ='all_checks'
lowerCamelCase__ ='basic_checks'
lowerCamelCase__ ='no_checks'
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
def lowerCamelCase__ ( _a , _a , _a=None):
if expected_checksums is None:
logger.info("Unable to verify checksums.")
return
if len(set(_a) - set(_a)) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_a) - set(_a)))
if len(set(_a) - set(_a)) > 0:
raise UnexpectedDownloadedFile(str(set(_a) - set(_a)))
SCREAMING_SNAKE_CASE : str = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE : Tuple = " for " + verification_name if verification_name is not None else ""
if len(_a) > 0:
raise NonMatchingChecksumError(
f"Checksums didn't match{for_verification_name}:\n"
f"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error")
logger.info("All the checksums matched successfully" + for_verification_name)
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
def lowerCamelCase__ ( _a , _a):
if expected_splits is None:
logger.info("Unable to verify splits sizes.")
return
if len(set(_a) - set(_a)) > 0:
raise ExpectedMoreSplits(str(set(_a) - set(_a)))
if len(set(_a) - set(_a)) > 0:
raise UnexpectedSplits(str(set(_a) - set(_a)))
SCREAMING_SNAKE_CASE : List[str] = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_a) > 0:
raise NonMatchingSplitsSizesError(str(_a))
logger.info("All the splits matched successfully.")
def lowerCamelCase__ ( _a , _a = True):
if record_checksum:
SCREAMING_SNAKE_CASE : List[str] = shaaaa()
with open(_a , "rb") as f:
for chunk in iter(lambda: f.read(1 << 20) , b""):
m.update(_a)
SCREAMING_SNAKE_CASE : Optional[int] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE : List[str] = None
return {"num_bytes": os.path.getsize(_a), "checksum": checksum}
def lowerCamelCase__ ( _a):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False | 76 | 0 |
def lowerCAmelCase_( lowercase_ : int ) -> int:
_lowerCamelCase = [1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0, 0, 0
_lowerCamelCase = ugly_nums[ia] * 2
_lowerCamelCase = ugly_nums[ia] * 3
_lowerCamelCase = ugly_nums[ia] * 5
for _ in range(1 , lowercase_ ):
_lowerCamelCase = min(lowercase_ , lowercase_ , lowercase_ )
ugly_nums.append(lowercase_ )
if next_num == next_a:
ia += 1
_lowerCamelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_lowerCamelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_lowerCamelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(2_0_0) = }""")
| 364 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCAmelCase_( lowercase_ : List[str]="" ) -> str:
_lowerCamelCase = tempfile.mkdtemp()
return os.path.join(lowercase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_lowerCamelCase = AgentAudio(lowerCamelCase__ )
_lowerCamelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
# Ensure that the file contains the same value as the original tensor
_lowerCamelCase , _lowerCamelCase = sf.read(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , torch.tensor(lowerCamelCase__ ) , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_lowerCamelCase = get_new_path(suffix='''.wav''' )
sf.write(lowerCamelCase__ , lowerCamelCase__ , 1_6_0_0_0 )
_lowerCamelCase = AgentAudio(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , lowerCamelCase__ )
@require_vision
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
_lowerCamelCase = AgentImage(lowerCamelCase__ )
_lowerCamelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def snake_case__ ( self ):
_lowerCamelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_lowerCamelCase = Image.open(lowerCamelCase__ )
_lowerCamelCase = AgentImage(lowerCamelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def snake_case__ ( self ):
_lowerCamelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_lowerCamelCase = Image.open(lowerCamelCase__ )
_lowerCamelCase = AgentImage(lowerCamelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = '''Hey!'''
_lowerCamelCase = AgentText(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , agent_type.to_string() )
self.assertEqual(lowerCamelCase__ , agent_type.to_raw() )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 73 | 0 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class A ( logging.LoggerAdapter ):
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : List[str] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
A : Dict = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE )
A : Optional[Any] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE )
if self.isEnabledFor(SCREAMING_SNAKE_CASE ):
if self._should_log(SCREAMING_SNAKE_CASE ):
A, A : Optional[Any] = self.process(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.logger.log(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif in_order:
A : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
A, A : List[Any] = self.process(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.logger.log(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
state.wait_for_everyone()
def lowerCAmelCase_ ( snake_case__ , snake_case__ = None ):
'''simple docstring'''
if log_level is None:
A : str = os.environ.get('''ACCELERATE_LOG_LEVEL''' , snake_case__ )
A : Dict = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__ , {} )
| 3 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 110 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : List[Any] = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=32 * 4 , __SCREAMING_SNAKE_CASE=32 * 6 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=32 , ):
"""simple docstring"""
lowercase_ : Tuple = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : Dict = is_training
lowercase_ : Optional[Any] = use_auxiliary_loss
lowercase_ : Optional[Any] = num_queries
lowercase_ : Any = num_channels
lowercase_ : str = min_size
lowercase_ : str = max_size
lowercase_ : Optional[Any] = num_labels
lowercase_ : List[str] = mask_feature_size
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__SCREAMING_SNAKE_CASE )
lowercase_ : str = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE )
lowercase_ : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__SCREAMING_SNAKE_CASE ) > 0.5
).float()
lowercase_ : str = (torch.rand((self.batch_size, self.num_labels) , device=__SCREAMING_SNAKE_CASE ) > 0.5).long()
lowercase_ : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _snake_case ( self ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = self.prepare_config_and_inputs()
lowercase_ : Any = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = output.encoder_hidden_states
lowercase_ : List[Any] = output.pixel_decoder_hidden_states
lowercase_ : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) , config.decoder_config.decoder_layers )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
with torch.no_grad():
lowercase_ : Any = MaskFormerModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Dict = model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = MaskFormerForInstanceSegmentation(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
def comm_check_on_output(__SCREAMING_SNAKE_CASE ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase_ : Tuple = model(pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE )
lowercase_ : str = model(__SCREAMING_SNAKE_CASE )
comm_check_on_output(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = model(
pixel_values=__SCREAMING_SNAKE_CASE , pixel_mask=__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE )
comm_check_on_output(__SCREAMING_SNAKE_CASE )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCAmelCase_ = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = MaskFormerModelTester(self )
lowercase_ : Tuple = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def _snake_case ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Dict = [*signature.parameters.keys()]
lowercase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowercase_ : Tuple = MaskFormerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = (self.model_tester.min_size,) * 2
lowercase_ : int = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__SCREAMING_SNAKE_CASE ),
'''mask_labels''': torch.randn((2, 10, *size) , device=__SCREAMING_SNAKE_CASE ),
'''class_labels''': torch.zeros(2 , 10 , device=__SCREAMING_SNAKE_CASE ).long(),
}
lowercase_ : Any = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = model(**__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.attentions is not None )
def _snake_case ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowercase_ : Optional[Any] = self.all_model_classes[1]
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = self.all_model_classes[1]
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
lowercase_ : Tuple = True
lowercase_ : Optional[Any] = True
lowercase_ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
lowercase_ : List[Any] = model(__SCREAMING_SNAKE_CASE , mask_labels=__SCREAMING_SNAKE_CASE , class_labels=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase_ : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowercase_ : List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase_ : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowercase : int = 1E-4
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : Any = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 8_00, 10_88) )
with torch.no_grad():
lowercase_ : Any = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[Any] = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
lowercase_ : Dict = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__SCREAMING_SNAKE_CASE )
.eval()
)
lowercase_ : List[str] = self.default_image_processor
lowercase_ : Union[str, Any] = prepare_img()
lowercase_ : List[str] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 8_00, 10_88) )
with torch.no_grad():
lowercase_ : Tuple = model(**__SCREAMING_SNAKE_CASE )
# masks_queries_logits
lowercase_ : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase_ : Any = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
lowercase_ : List[str] = torch.tensor(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
# class_queries_logits
lowercase_ : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase_ : Any = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__SCREAMING_SNAKE_CASE )
.eval()
)
lowercase_ : Tuple = self.default_image_processor
lowercase_ : Any = prepare_img()
lowercase_ : Union[str, Any] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__SCREAMING_SNAKE_CASE , (1, 3, 8_00, 10_88) )
with torch.no_grad():
lowercase_ : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
# masks_queries_logits
lowercase_ : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase_ : Optional[int] = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
lowercase_ : Dict = torch.tensor(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
# class_queries_logits
lowercase_ : Any = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase_ : Union[str, Any] = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=__SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__SCREAMING_SNAKE_CASE )
.eval()
)
lowercase_ : int = self.default_image_processor
lowercase_ : Optional[Any] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowercase_ : Optional[int] = inputs['''pixel_values'''].to(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = [el.to(__SCREAMING_SNAKE_CASE ) for el in inputs['''mask_labels''']]
lowercase_ : int = [el.to(__SCREAMING_SNAKE_CASE ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowercase_ : List[str] = model(**__SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
| 264 | 0 |
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__a = 2048
__a = 4096
__a = 42
__a = os.environ.pop("PROCESS_TRAIN", "false")
__a = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
def choose_first(_lowerCAmelCase , _lowerCAmelCase=False ):
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) == 1:
snake_case__ : Optional[Any] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
snake_case__ : Any = {k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
snake_case__ : Union[str, Any] = {"""id""": example["""id"""]}
snake_case__ : int = example["""annotations"""]
snake_case__ : str = annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
snake_case__ : Optional[Any] = ["""yes"""] if 1 in yes_no_answer else ["""no"""]
snake_case__ : Dict = []
snake_case__ : int = []
snake_case__ : Optional[Any] = ["""<cls>"""]
else:
snake_case__ : Tuple = ["""short"""]
snake_case__ : Tuple = choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
snake_case__ : Any = ["""long"""]
snake_case__ : List[str] = choose_first(annotation["""long_answer"""] , is_long_answer=_lowerCAmelCase )
snake_case__ : Tuple = []
answer.update(_lowerCAmelCase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
snake_case__ : List[str] = True
else:
snake_case__ : str = False
snake_case__ : Any = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , _lowerCAmelCase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> List[Any]:
snake_case__ : Optional[Any] = _get_single_answer(_lowerCAmelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case__ : Optional[Any] = example["""document"""]["""tokens"""]
snake_case__ : List[str] = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(_lowerCAmelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
snake_case__ : Any = ["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
snake_case__ : List[str] = example["""document"""]["""tokens"""]
snake_case__ : str = answer["""start_token"""]
snake_case__ : List[str] = answer["""end_token"""]
snake_case__ : Tuple = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
snake_case__ : int = """ """.join(context[start_token:end_token] )
# checking above code
if assertion:
snake_case__ : List[str] = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
snake_case__ : List[str] = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
snake_case__ : Optional[int] = """ """.join([old[i] for i in range(len(_lowerCAmelCase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , _lowerCAmelCase , end="""\n""" )
print("""Old:""" , _lowerCAmelCase , end="""\n\n""" )
return {
"context": " ".join(_lowerCAmelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=2_048 , _lowerCAmelCase=4_096 , _lowerCAmelCase=True ) -> Optional[Any]:
# overlap will be of doc_stride - q_len
snake_case__ : Optional[int] = get_context_and_ans(_lowerCAmelCase , assertion=_lowerCAmelCase )
snake_case__ : int = out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
snake_case__ : Union[str, Any] = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
snake_case__ : int = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
snake_case__ : Tuple = []
snake_case__ : Dict = []
snake_case__ : Optional[Any] = input_ids[:q_len]
snake_case__ : List[str] = range(_lowerCAmelCase , len(_lowerCAmelCase ) , max_length - doc_stride )
for i in doc_start_indices:
snake_case__ : Any = i + max_length - q_len
snake_case__ : str = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(_lowerCAmelCase ),
"end_token": [-100] * len(_lowerCAmelCase ),
"category": category,
},
}
snake_case__ : int = out["""context"""].split()
snake_case__ : Tuple = splitted_context[answer["""end_token"""]]
snake_case__ : int = len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=_lowerCAmelCase , ).input_ids )
snake_case__ : int = len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=_lowerCAmelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
snake_case__ : Tuple = len(tokenizer(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
snake_case__ : int = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
snake_case__ : Dict = answer["""start_token"""]
snake_case__ : Optional[int] = answer["""end_token"""]
if assertion:
snake_case__ : Union[str, Any] = tokenizer.decode(_lowerCAmelCase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , _lowerCAmelCase , end="""\n\n""" )
if len(_lowerCAmelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
snake_case__ : Tuple = input_ids[:q_len]
snake_case__ : Optional[int] = range(_lowerCAmelCase , len(_lowerCAmelCase ) , max_length - doc_stride )
snake_case__ : Dict = []
snake_case__ : List[str] = []
snake_case__ : int = []
snake_case__ : Any = [] # null, yes, no, long, short
for i in doc_start_indices:
snake_case__ : int = i + max_length - q_len
snake_case__ : List[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
snake_case__ : List[Any] = start_token - i + q_len
snake_case__ : Dict = end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
snake_case__ : Union[str, Any] = -100
snake_case__ : Any = -100
answers_category.append("""null""" )
snake_case__ : Tuple = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_lowerCAmelCase )
answers_end_token.append(_lowerCAmelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(_lowerCAmelCase ) )
print("""Old:""" , tokenizer.decode(_lowerCAmelCase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=2_048 , _lowerCAmelCase=4_096 , _lowerCAmelCase=False ) -> Optional[Any]:
snake_case__ : int = get_strided_contexts_and_ans(
_lowerCAmelCase , _lowerCAmelCase , doc_stride=_lowerCAmelCase , max_length=_lowerCAmelCase , assertion=_lowerCAmelCase , )
return example
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
with jsonlines.open(_lowerCAmelCase , """a""" ) as writer:
for example in tqdm(_lowerCAmelCase , total=len(_lowerCAmelCase ) , desc="""Saving samples ... """ ):
snake_case__ : Optional[Any] = example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__a = load_dataset("natural_questions")
__a = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
__a = data["train" if PROCESS_TRAIN == "true" else "validation"]
__a = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
__a = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__a = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
__a = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 35 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : int = int(number**0.5 )
return number == sq * sq
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> tuple[int, int]:
_lowercase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_lowercase : int = x_den * y_den * z_den
_lowercase : int = gcd(lowerCamelCase_ , lowerCamelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase_( lowerCamelCase_ = 35 ) -> int:
_lowercase : set = set()
_lowercase : int
_lowercase : Fraction = Fraction(0 )
_lowercase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_lowercase : int = x_num * y_den + x_den * y_num
_lowercase : int = x_den * y_den
_lowercase : str = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : List[Any] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_lowercase : List[Any] = x_den * x_den * y_den * y_den
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : int = int(sqrt(lowerCamelCase_ ) )
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Optional[int] = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=-1
_lowercase : Any = x_num * y_num
_lowercase : str = x_den * y_num + x_num * y_den
_lowercase : Any = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : int = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
# n=2
_lowercase : str = x_num * x_num * y_num * y_num
_lowercase : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCamelCase_ ) and is_sq(lowerCamelCase_ ):
_lowercase : Tuple = int(sqrt(lowerCamelCase_ ) )
_lowercase : List[str] = int(sqrt(lowerCamelCase_ ) )
_lowercase : Union[str, Any] = gcd(lowerCamelCase_ , lowerCamelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowercase : Tuple = add_three(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
unique_s.add(lowerCamelCase_ )
for num, den in unique_s:
total += Fraction(lowerCamelCase_ , lowerCamelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 21 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
_UpperCAmelCase = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
_UpperCAmelCase = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
_UpperCAmelCase = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 352 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ ( __lowercase ):
A_ = ['image_processor', 'tokenizer']
A_ = 'ChineseCLIPImageProcessor'
A_ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Optional[Any] , _snake_case : List[Any]=None , _snake_case : str=None , **_snake_case : int )->List[str]:
'''simple docstring'''
__lowerCAmelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _snake_case , )
__lowerCAmelCase : List[str] = kwargs.pop("""feature_extractor""" )
__lowerCAmelCase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_snake_case , _snake_case )
__lowerCAmelCase : Any = self.image_processor
def __call__( self : Optional[Any] , _snake_case : Tuple=None , _snake_case : Tuple=None , _snake_case : List[str]=None , **_snake_case : Any )->Union[str, Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__lowerCAmelCase : List[Any] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
__lowerCAmelCase : Tuple = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
__lowerCAmelCase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def UpperCAmelCase__ ( self : Optional[int] , *_snake_case : Union[str, Any] , **_snake_case : int )->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def UpperCAmelCase__ ( self : Dict , *_snake_case : Dict , **_snake_case : Any )->Dict:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def UpperCAmelCase__ ( self : Optional[int] )->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.tokenizer.model_input_names
__lowerCAmelCase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self : Dict )->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _snake_case , )
return self.image_processor_class | 232 | 0 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_UpperCamelCase = trt.Logger(trt.Logger.WARNING)
_UpperCamelCase = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_UpperCamelCase = logging.getLogger(__name__)
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
_UpperCamelCase = parser.parse_args()
if args.tokenizer_name:
_UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
_UpperCamelCase = args.per_device_eval_batch_size
_UpperCamelCase = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_UpperCamelCase = True
_UpperCamelCase = "temp_engine/bert-fp32.engine"
if args.fpaa:
_UpperCamelCase = "temp_engine/bert-fp16.engine"
if args.inta:
_UpperCamelCase = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
_UpperCamelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_UpperCamelCase = [network.get_input(i) for i in range(network.num_inputs)]
_UpperCamelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_UpperCamelCase = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_UpperCamelCase = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_UpperCamelCase = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Optional[Any] = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
__lowerCAmelCase : Dict = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
__lowerCAmelCase : Dict = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase__ )
# start time
__lowerCAmelCase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase__ ) for d_inp in d_inputs] + [int(lowercase__ ), int(lowercase__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase__ , lowercase__ , lowercase__ )
cuda.memcpy_dtoh_async(lowercase__ , lowercase__ , lowercase__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__lowerCAmelCase : str = time.time()
__lowerCAmelCase : int = end_time - start_time
__lowerCAmelCase : Optional[int] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_UpperCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCamelCase = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_UpperCamelCase = raw_datasets["validation"].column_names
_UpperCamelCase = "question" if "question" in column_names else column_names[0]
_UpperCamelCase = "context" if "context" in column_names else column_names[1]
_UpperCamelCase = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_UpperCamelCase = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
_UpperCamelCase = min(args.max_seq_length, tokenizer.model_max_length)
def _lowercase ( lowercase__ ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__lowerCAmelCase : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__lowerCAmelCase : Any = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=lowercase__ , stride=args.doc_stride , return_overflowing_tokens=lowercase__ , return_offsets_mapping=lowercase__ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__lowerCAmelCase : int = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__lowerCAmelCase : Dict = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__lowerCAmelCase : Optional[int] = tokenized_examples.sequence_ids(lowercase__ )
__lowerCAmelCase : str = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__lowerCAmelCase : Any = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__lowerCAmelCase : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
_UpperCamelCase = raw_datasets["validation"]
# Validation Feature Creation
_UpperCamelCase = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
_UpperCamelCase = default_data_collator
_UpperCamelCase = eval_dataset.remove_columns(["example_id", "offset_mapping"])
_UpperCamelCase = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__lowerCAmelCase : List[str] = postprocess_qa_predictions(
examples=lowercase__ , features=lowercase__ , predictions=lowercase__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__lowerCAmelCase : List[Any] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__lowerCAmelCase : Any = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__lowerCAmelCase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase__ , label_ids=lowercase__ )
_UpperCamelCase = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowercase ( lowercase__ ):
return trt.volume(engine.get_binding_shape(lowercase__ ) ) * engine.get_binding_dtype(lowercase__ ).itemsize
# Allocate device memory for inputs and outputs.
_UpperCamelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_UpperCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_UpperCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_UpperCamelCase = cuda.mem_alloc(h_outputa.nbytes)
_UpperCamelCase = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_UpperCamelCase = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(F" Num examples = {len(eval_dataset)}")
logger.info(F" Batch size = {args.per_device_eval_batch_size}")
_UpperCamelCase = 0.0
_UpperCamelCase = 0
_UpperCamelCase = timeit.default_timer()
_UpperCamelCase = None
for step, batch in enumerate(eval_dataloader):
_UpperCamelCase , _UpperCamelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_UpperCamelCase , _UpperCamelCase = outputs
_UpperCamelCase = torch.tensor(start_logits)
_UpperCamelCase = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_UpperCamelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
_UpperCamelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
_UpperCamelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_UpperCamelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
_UpperCamelCase = nested_truncate(all_preds, len(eval_dataset))
_UpperCamelCase = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000))
logger.info("Total Number of Inference = %d", niter)
_UpperCamelCase = post_processing_function(eval_examples, eval_dataset, all_preds)
_UpperCamelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"Evaluation metrics: {eval_metric}")
| 275 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
_UpperCAmelCase = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
_UpperCAmelCase = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
_UpperCAmelCase = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[Any]="binary" , lowerCAmelCase : int=None , lowerCAmelCase : str="warn" , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =recall_score(
lowerCAmelCase , lowerCAmelCase , labels=lowerCAmelCase , pos_label=lowerCAmelCase , average=lowerCAmelCase , sample_weight=lowerCAmelCase , zero_division=lowerCAmelCase , )
return {"recall": float(lowerCAmelCase ) if score.size == 1 else score}
| 173 | 0 |
import random
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> List[str]:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = [], [], []
for element in data:
if element < pivot:
less.append(_a )
elif element > pivot:
greater.append(_a )
else:
equal.append(_a )
return less, equal, greater
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
if index >= len(_a ) or index < 0:
return None
UpperCamelCase_ = items[random.randint(0 , len(_a ) - 1 )]
UpperCamelCase_ = 0
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = _partition(_a , _a )
UpperCamelCase_ = len(_a )
UpperCamelCase_ = len(_a )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_a , _a )
# must be in larger
else:
return quick_select(_a , index - (m + count) )
| 360 |
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> list:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return lst
UpperCamelCase_ = 1
while i < len(SCREAMING_SNAKE_CASE_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCamelCase_ , UpperCamelCase_ = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCamelCase_ = 1
return lst
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Union[str, Any] = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE :Optional[int] = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 60 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
a_ = logging.get_logger(__name__)
a_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
a_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
a_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
a_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
a_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
a_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
a_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
a_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
a_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
a_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
a_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
a_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
a_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
a_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_MAPPING
a_ = auto_class_update(FlaxAutoModel)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_PRETRAINING_MAPPING
a_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
a_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_MASKED_LM_MAPPING
a_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
a_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
a_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
a_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 340 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Any =CTRLTokenizer
lowercase : Union[str, Any] =False
lowercase : Optional[int] =False
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ =['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ =['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowerCamelCase_ ={'''unk_token''': '''<unk>'''}
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase ) )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ='''adapt react readapt apt'''
lowerCamelCase_ ='''adapt react readapt apt'''
return input_text, output_text
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowerCamelCase_ ='''adapt react readapt apt'''
lowerCamelCase_ ='''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowerCamelCase_ =tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =tokens + [tokenizer.unk_token]
lowerCamelCase_ =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), lowerCAmelCase )
| 6 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =['image_processor', 'tokenizer']
lowercase : Optional[int] ='AutoImageProcessor'
lowercase : List[str] ='AutoTokenizer'
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
lowerCamelCase_ =args[0]
lowerCamelCase_ =args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ =encodings['''input_ids''']
return inputs
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowerCamelCase_ =True
lowerCamelCase_ =self.tokenizer
yield
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ):
"""simple docstring"""
if added_vocab is None:
lowerCamelCase_ =self.tokenizer.get_added_vocab()
lowerCamelCase_ ={}
while tokens:
lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE )
if start_token is None:
break
lowerCamelCase_ =start_token.group(1 )
lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE )
lowerCamelCase_ =start_token.group()
if end_token is None:
lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' )
else:
lowerCamelCase_ =end_token.group()
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE )
if content is not None:
lowerCamelCase_ =content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if value:
if len(lowerCAmelCase ) == 1:
lowerCamelCase_ =value[0]
lowerCamelCase_ =value
else: # leaf nodes
lowerCamelCase_ =[]
for leaf in content.split(R'''<sep/>''' ):
lowerCamelCase_ =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase_ =leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase )
if len(output[key] ) == 1:
lowerCamelCase_ =output[key][0]
lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if len(lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 1 |
def _lowercase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 1
while len(UpperCamelCase_ ) < 1e6:
constant.append(str(UpperCamelCase_ ) )
i += 1
SCREAMING_SNAKE_CASE__ = ''.join(UpperCamelCase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 176 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__snake_case = logging.get_logger(__name__)
__snake_case = TypeVar("""DatasetType""", Dataset, IterableDataset)
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(UpperCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , stopping_strategy=UpperCamelCase_ )
else:
return _interleave_iterable_datasets(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , stopping_strategy=UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(UpperCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , axis=UpperCamelCase_ )
else:
return _concatenate_iterable_datasets(UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , axis=UpperCamelCase_ )
| 176 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> None:
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" ,__UpperCAmelCase ,)
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
| 184 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase = True , UpperCamelCase = math.inf , UpperCamelCase = -math.inf , UpperCamelCase = math.inf , UpperCamelCase = -math.inf , UpperCamelCase = False , UpperCamelCase = 100 , UpperCamelCase = 0.01 , UpperCamelCase = 1 , ):
"""simple docstring"""
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Optional[Any] = search_prob
lowerCAmelCase__ : Tuple = start_temperate
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Tuple = None
while not search_end:
lowerCAmelCase__ : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCAmelCase__ : List[str] = current_state
scores.append(UpperCamelCase )
iterations += 1
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCAmelCase__ : Tuple = random.randint(0 , len(UpperCamelCase ) - 1 ) # picking a random neighbor
lowerCAmelCase__ : str = neighbors.pop(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCAmelCase__ : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCAmelCase__ : Tuple = picked_neighbor
else:
lowerCAmelCase__ : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCAmelCase__ : List[Any] = picked_neighbor
lowerCAmelCase__ : Optional[int] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCAmelCase__ : str = True
else:
lowerCAmelCase__ : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(UpperCamelCase ) , UpperCamelCase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_lowerCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowerCAmelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
_lowerCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowerCAmelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
_lowerCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowerCAmelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
_lowerCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowerCAmelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 184 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int=None ) -> Dict:
UpperCAmelCase_ : int = argparse.ArgumentParser(add_help=SCREAMING_SNAKE_CASE__, allow_abbrev=SCREAMING_SNAKE_CASE__ )
# The main config parser
UpperCAmelCase_ : Dict = config_command_parser(SCREAMING_SNAKE_CASE__ )
# The subparser to add commands to
UpperCAmelCase_ : Union[str, Any] = config_parser.add_subparsers(title='''subcommands''', dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(SCREAMING_SNAKE_CASE__, parents=[parent_parser] )
update_command_parser(SCREAMING_SNAKE_CASE__, parents=[parent_parser] )
return config_parser
def lowerCamelCase_ ( ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = get_config_parser()
UpperCAmelCase_ : List[Any] = config_parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE__, '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 125 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 100 ) -> int:
UpperCAmelCase_ : Tuple = n * (n + 1) * (2 * n + 1) / 6
UpperCAmelCase_ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 125 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 222 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class __SCREAMING_SNAKE_CASE :
def __init__( self : Any , __lowercase : set[int] , __lowercase : Mapping[EdgeT, int] ) -> None:
SCREAMING_SNAKE_CASE__ : set[int] =vertices
SCREAMING_SNAKE_CASE__ : dict[EdgeT, int] ={
(min(__lowercase ), max(__lowercase )): weight for edge, weight in edges.items()
}
def __magic_name__ ( self : Union[str, Any] , __lowercase : EdgeT , __lowercase : int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
SCREAMING_SNAKE_CASE__ : List[str] =weight
def __magic_name__ ( self : Optional[Any] ) -> Graph:
SCREAMING_SNAKE_CASE__ : Graph =Graph({min(self.vertices )} , {} )
SCREAMING_SNAKE_CASE__ : EdgeT
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : EdgeT
SCREAMING_SNAKE_CASE__ : int
while len(subgraph.vertices ) < len(self.vertices ):
SCREAMING_SNAKE_CASE__ : Any =max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
SCREAMING_SNAKE_CASE__ : List[str] =edge
SCREAMING_SNAKE_CASE__ : Any =weight
subgraph.add_edge(__lowercase , __lowercase )
return subgraph
def _a( UpperCamelCase__ : str = "p107_network.txt" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ : str =os.path.join(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : dict[EdgeT, int] ={}
SCREAMING_SNAKE_CASE__ : list[str]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ : Any =f.read().strip().split('''\n''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =[line.split(''',''' ) for line in data]
for edgea in range(1, len(UpperCamelCase__ ) ):
for edgea in range(UpperCamelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
SCREAMING_SNAKE_CASE__ : List[Any] =int(adjaceny_matrix[edgea][edgea] )
SCREAMING_SNAKE_CASE__ : Graph =Graph(set(range(len(UpperCamelCase__ ) ) ), UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Graph =graph.prims_algorithm()
SCREAMING_SNAKE_CASE__ : int =sum(graph.edges.values() )
SCREAMING_SNAKE_CASE__ : int =sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''') | 222 | 1 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={"""vocab_file""": """vocab.txt"""}
a ={
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
a ={
"""openbmb/cpm-ant-10b""": 1024,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = collections.OrderedDict()
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as reader:
__lowerCamelCase : Optional[int] = reader.readlines()
for index, token in enumerate(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = token.rstrip('\n' )
__lowerCamelCase : Optional[Any] = index
return vocab
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict="<unk>" ,SCREAMING_SNAKE_CASE__ : Tuple=2_0_0):
__lowerCamelCase : Optional[Any] = vocab
__lowerCamelCase : Optional[int] = unk_token
__lowerCamelCase : str = max_input_chars_per_word
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : int = list(SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > self.max_input_chars_per_word:
return [self.unk_token]
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : List[Any] = []
while start < len(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = None
while start < end:
__lowerCamelCase : Dict = ''.join(chars[start:end])
if substr in self.vocab:
__lowerCamelCase : str = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = end
return sub_tokens
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : Union[str, Any] = False
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[int]="<d>" ,SCREAMING_SNAKE_CASE__ : str="</d>" ,SCREAMING_SNAKE_CASE__ : Dict="<s>" ,SCREAMING_SNAKE_CASE__ : Dict="</s>" ,SCREAMING_SNAKE_CASE__ : str="<pad>" ,SCREAMING_SNAKE_CASE__ : Any="<unk>" ,SCREAMING_SNAKE_CASE__ : Dict="</n>" ,SCREAMING_SNAKE_CASE__ : Any="</_>" ,SCREAMING_SNAKE_CASE__ : Any="left" ,**SCREAMING_SNAKE_CASE__ : str ,):
requires_backends(self ,['jieba'])
super().__init__(
bod_token=SCREAMING_SNAKE_CASE__ ,eod_token=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,line_token=SCREAMING_SNAKE_CASE__ ,space_token=SCREAMING_SNAKE_CASE__ ,padding_side=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : List[Any] = bod_token
__lowerCamelCase : int = eod_token
__lowerCamelCase : Dict = load_vocab(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.encoder[space_token]
__lowerCamelCase : Optional[Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowerCamelCase : Tuple = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1]))
__lowerCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
__lowerCamelCase : int = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token)
@property
def lowerCAmelCase ( self : Union[str, Any]):
return self.encoder[self.bod_token]
@property
def lowerCAmelCase ( self : Dict):
return self.encoder[self.eod_token]
@property
def lowerCAmelCase ( self : int):
return self.encoder["\n"]
@property
def lowerCAmelCase ( self : List[str]):
return len(self.encoder)
def lowerCAmelCase ( self : Optional[int]):
return dict(self.encoder ,**self.added_tokens_encoder)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Tuple = []
for x in jieba.cut(SCREAMING_SNAKE_CASE__ ,cut_all=SCREAMING_SNAKE_CASE__):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__))
return output_tokens
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Dict = [i for i in token_ids if i >= 0]
__lowerCamelCase : Optional[int] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str]):
return token in self.encoder
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
return "".join(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[str]):
return self.encoder.get(SCREAMING_SNAKE_CASE__ ,self.encoder.get(self.unk_token))
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple):
return self.decoder.get(SCREAMING_SNAKE_CASE__ ,self.unk_token)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if os.path.isdir(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : str = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
else:
__lowerCamelCase : Dict = (filename_prefix + '-' if filename_prefix else '') + save_directory
__lowerCamelCase : Optional[Any] = 0
if " " in self.encoder:
__lowerCamelCase : Dict = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
__lowerCamelCase : List[str] = self.encoder['\n']
del self.encoder["\n"]
__lowerCamelCase : Any = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1]))
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!')
__lowerCamelCase : Optional[Any] = token_index
writer.write(token + '\n')
index += 1
return (vocab_file,)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : List[int] = None):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
| 73 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( a__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = emb.weight.shape
__SCREAMING_SNAKE_CASE = nn.Linear(a__ , a__ , bias=a__ )
__SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def __lowercase ( a__ ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location='cpu' )
__SCREAMING_SNAKE_CASE = mam_aaa['args'] or mam_aaa['cfg']['model']
__SCREAMING_SNAKE_CASE = mam_aaa['model']
remove_ignore_keys_(a__ )
__SCREAMING_SNAKE_CASE = state_dict['encoder.embed_tokens.weight'].shape[0]
__SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=a__ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
__SCREAMING_SNAKE_CASE = state_dict['decoder.embed_tokens.weight']
__SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(a__ )
model.model.load_state_dict(a__ , strict=a__ )
__SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ : Optional[int] =parser.parse_args()
lowerCAmelCase__ : Tuple =convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 257 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase =fname.split(os.path.sep )[-1]
return re.search(R"""^(.*)_\d+\.jpg$""" , __UpperCamelCase ).groups()[0]
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Union[str, Any]:
_lowerCAmelCase =file_names
_lowerCAmelCase =image_transform
_lowerCAmelCase =label_to_id
def __len__( self ) -> Union[str, Any]:
return len(self.file_names )
def __getitem__( self , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =self.file_names[idx]
_lowerCAmelCase =PIL.Image.open(__UpperCAmelCase )
_lowerCAmelCase =raw_image.convert("""RGB""" )
if self.image_transform is not None:
_lowerCAmelCase =self.image_transform(__UpperCAmelCase )
_lowerCAmelCase =extract_label(__UpperCAmelCase )
if self.label_to_id is not None:
_lowerCAmelCase =self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any:
# Initialize accelerator
if args.with_tracking:
_lowerCAmelCase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
_lowerCAmelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase =config["""lr"""]
_lowerCAmelCase =int(config["""num_epochs"""] )
_lowerCAmelCase =int(config["""seed"""] )
_lowerCAmelCase =int(config["""batch_size"""] )
_lowerCAmelCase =config["""image_size"""]
if not isinstance(__UpperCamelCase , (list, tuple) ):
_lowerCAmelCase =(image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , """isdigit""" ):
if args.checkpointing_steps == "epoch":
_lowerCAmelCase =args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_lowerCAmelCase =int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
_lowerCAmelCase =None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_lowerCAmelCase =os.path.split(__UpperCamelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(__UpperCamelCase , __UpperCamelCase )
# Grab all the image filenames
_lowerCAmelCase =[os.path.join(args.data_dir , __UpperCamelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )]
# Build the label correspondences
_lowerCAmelCase =[extract_label(__UpperCamelCase ) for fname in file_names]
_lowerCAmelCase =list(set(__UpperCamelCase ) )
id_to_label.sort()
_lowerCAmelCase ={lbl: i for i, lbl in enumerate(__UpperCamelCase )}
# Set the seed before splitting the data.
np.random.seed(__UpperCamelCase )
torch.manual_seed(__UpperCamelCase )
torch.cuda.manual_seed_all(__UpperCamelCase )
# Split our filenames between train and validation
_lowerCAmelCase =np.random.permutation(len(__UpperCamelCase ) )
_lowerCAmelCase =int(0.8 * len(__UpperCamelCase ) )
_lowerCAmelCase =random_perm[:cut]
_lowerCAmelCase =random_perm[cut:]
# For training we use a simple RandomResizedCrop
_lowerCAmelCase =Compose([RandomResizedCrop(__UpperCamelCase , scale=(0.5, 1.0) ), ToTensor()] )
_lowerCAmelCase =PetsDataset(
[file_names[i] for i in train_split] , image_transform=__UpperCamelCase , label_to_id=__UpperCamelCase )
# For evaluation, we use a deterministic Resize
_lowerCAmelCase =Compose([Resize(__UpperCamelCase ), ToTensor()] )
_lowerCAmelCase =PetsDataset([file_names[i] for i in eval_split] , image_transform=__UpperCamelCase , label_to_id=__UpperCamelCase )
# Instantiate dataloaders.
_lowerCAmelCase =DataLoader(__UpperCamelCase , shuffle=__UpperCamelCase , batch_size=__UpperCamelCase , num_workers=4 )
_lowerCAmelCase =DataLoader(__UpperCamelCase , shuffle=__UpperCamelCase , batch_size=__UpperCamelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase =create_model("""resnet50d""" , pretrained=__UpperCamelCase , num_classes=len(__UpperCamelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase =model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_lowerCAmelCase =False
for param in model.get_classifier().parameters():
_lowerCAmelCase =True
# We normalize the batches of images to be a bit faster.
_lowerCAmelCase =torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device )
_lowerCAmelCase =torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase =torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_lowerCAmelCase =OneCycleLR(optimizer=__UpperCamelCase , max_lr=__UpperCamelCase , epochs=__UpperCamelCase , steps_per_epoch=len(__UpperCamelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase =0
# We also need to keep track of the starting epoch so files are named properly
_lowerCAmelCase =0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
_lowerCAmelCase =os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_lowerCAmelCase =[f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_lowerCAmelCase =dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_lowerCAmelCase =os.path.splitext(__UpperCamelCase )[0]
if "epoch" in training_difference:
_lowerCAmelCase =int(training_difference.replace("""epoch_""" , """""" ) ) + 1
_lowerCAmelCase =None
else:
_lowerCAmelCase =int(training_difference.replace("""step_""" , """""" ) )
_lowerCAmelCase =resume_step // len(__UpperCamelCase )
resume_step -= starting_epoch * len(__UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase , __UpperCamelCase ):
model.train()
if args.with_tracking:
_lowerCAmelCase =0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_lowerCAmelCase =accelerator.skip_first_batches(__UpperCamelCase , __UpperCamelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_lowerCAmelCase =train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowerCAmelCase ={k: v.to(accelerator.device ) for k, v in batch.items()}
_lowerCAmelCase =(batch["""image"""] - mean) / std
_lowerCAmelCase =model(__UpperCamelCase )
_lowerCAmelCase =torch.nn.functional.cross_entropy(__UpperCamelCase , batch["""label"""] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_lowerCAmelCase =os.path.join(args.output_dir , __UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
model.eval()
_lowerCAmelCase =0
_lowerCAmelCase =0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowerCAmelCase ={k: v.to(accelerator.device ) for k, v in batch.items()}
_lowerCAmelCase =(batch["""image"""] - mean) / std
with torch.no_grad():
_lowerCAmelCase =model(__UpperCamelCase )
_lowerCAmelCase =outputs.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather_for_metrics((predictions, batch["""label"""]) )
_lowerCAmelCase =predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_lowerCAmelCase =accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
"""accuracy""": 100 * eval_metric,
"""train_loss""": total_loss.item() / len(__UpperCamelCase ),
"""epoch""": epoch,
} , step=__UpperCamelCase , )
if checkpointing_steps == "epoch":
_lowerCAmelCase =F'''epoch_{epoch}'''
if args.output_dir is not None:
_lowerCAmelCase =os.path.join(args.output_dir , __UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
if args.with_tracking:
accelerator.end_training()
def _lowerCamelCase() -> Optional[Any]:
_lowerCAmelCase =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument("""--data_dir""" , required=__UpperCamelCase , help="""The data folder on disk.""" )
parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--checkpointing_steps""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , )
parser.add_argument(
"""--output_dir""" , type=__UpperCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=__UpperCamelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase ={"""lr""": 3E-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 354 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCamelCase__: List[Any] =tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCamelCase__: str =tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCamelCase__: Any =tf_top_k_top_p_filtering(UpperCAmelCase_ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4)
lowerCamelCase__: List[str] =output[output != -float("inf")]
lowerCamelCase__: List[Any] =tf.cast(
tf.where(tf.not_equal(UpperCAmelCase_ , tf.constant(-float("inf") , dtype=tf.floataa))) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-1_2)
tf.debugging.assert_equal(UpperCAmelCase_ , UpperCAmelCase_)
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowercase_ = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[int] =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
lowerCamelCase__: Any =2
lowerCamelCase__: List[Any] =2
class _SCREAMING_SNAKE_CASE ( tf.Module ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : Dict) ->Union[str, Any]:
'''simple docstring'''
super(UpperCAmelCase_ , self).__init__()
lowerCamelCase__: Union[str, Any] =model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids"),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask"),
) , jit_compile=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any) ->int:
'''simple docstring'''
lowerCamelCase__: Any =self.model.generate(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , max_new_tokens=UpperCAmelCase_ , return_dict_in_generate=UpperCAmelCase_ , )
return {"sequences": outputs["sequences"]}
lowerCamelCase__: Dict =[[2, 0], [102, 103]]
lowerCamelCase__: List[Any] =[[1, 0], [1, 1]]
lowerCamelCase__: Union[str, Any] =DummyModel(model=UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase_ , UpperCAmelCase_ , signatures={"serving_default": dummy_model.serving})
lowerCamelCase__: Tuple =tf.saved_model.load(UpperCAmelCase_).signatures["serving_default"]
for batch_size in range(1 , len(UpperCAmelCase_) + 1):
lowerCamelCase__: Optional[Any] ={
"input_ids": tf.constant(dummy_input_ids[:batch_size]),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size]),
}
lowerCamelCase__: int =serving_func(**UpperCAmelCase_)["sequences"]
lowerCamelCase__: int =test_model.generate(**UpperCAmelCase_ , max_new_tokens=UpperCAmelCase_)
tf.debugging.assert_equal(UpperCAmelCase_ , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
lowerCamelCase__: Tuple =1
lowerCamelCase__: int =2
class _SCREAMING_SNAKE_CASE ( tf.Module ):
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int) ->Tuple:
'''simple docstring'''
super(UpperCAmelCase_ , self).__init__()
lowerCamelCase__: str =model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids"),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask"),
) , jit_compile=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =self.model.generate(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , max_new_tokens=UpperCAmelCase_ , return_dict_in_generate=UpperCAmelCase_ , )
return {"sequences": outputs["sequences"]}
lowerCamelCase__: Dict =[[2], [102, 103]]
lowerCamelCase__: Tuple =[[1], [1, 1]]
lowerCamelCase__: List[str] =DummyModel(model=UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase_ , UpperCAmelCase_ , signatures={"serving_default": dummy_model.serving})
lowerCamelCase__: Dict =tf.saved_model.load(UpperCAmelCase_).signatures["serving_default"]
for input_row in range(len(UpperCAmelCase_)):
lowerCamelCase__: Optional[Any] ={
"input_ids": tf.constant([dummy_input_ids[input_row]]),
"attention_mask": tf.constant([dummy_attention_masks[input_row]]),
}
lowerCamelCase__: List[Any] =serving_func(**UpperCAmelCase_)["sequences"]
lowerCamelCase__: Optional[Any] =test_model.generate(**UpperCAmelCase_ , max_new_tokens=UpperCAmelCase_)
tf.debugging.assert_equal(UpperCAmelCase_ , UpperCAmelCase_)
@slow
@require_tensorflow_text
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self : Optional[int]) ->Optional[int]:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Dict =text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase_ , "spiece.model") , "rb").read())
lowerCamelCase__: Dict =TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5")
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Optional[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Any =self.tokenizer.tokenize(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Dict =text.pad_model_inputs(
UpperCAmelCase_ , max_seq_length=64 , pad_value=self.model.config.pad_token_id)
lowerCamelCase__: Dict =self.model.generate(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
return self.tokenizer.detokenize(UpperCAmelCase_)
lowerCamelCase__: int =CompleteSentenceTransformer()
lowerCamelCase__: Dict =tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs")
lowerCamelCase__: Optional[Any] =complete_model(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tf.keras.Model(UpperCAmelCase_ , UpperCAmelCase_)
keras_model.save(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->int:
'''simple docstring'''
lowerCamelCase__: str ={
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
lowerCamelCase__: Any =14
lowerCamelCase__: Tuple =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
lowerCamelCase__: int ="Hello, my dog is cute and"
lowerCamelCase__: Optional[int] =tokenizer(UpperCAmelCase_ , return_tensors="tf")
lowerCamelCase__: Tuple =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
lowerCamelCase__: Union[str, Any] =638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0"):
tf.random.set_seed(0)
lowerCamelCase__: int =model.generate(**UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
self.assertTrue(expectation == len(generated_tokens[0]))
lowerCamelCase__: List[str] =[638, 198]
with tf.device(":/CPU:0"):
tf.random.set_seed(0)
lowerCamelCase__: Dict =model.generate(**UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
self.assertTrue(expectation == len(generated_tokens[0]))
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
lowerCamelCase__: Optional[int] ="Hugging Face is a technology company based in New York and Paris."
lowerCamelCase__: Any =bart_tokenizer(UpperCAmelCase_ , return_tensors="tf").input_ids
lowerCamelCase__: Optional[int] =TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart")
lowerCamelCase__: Optional[int] =bart_model.generate(UpperCAmelCase_).numpy()
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : int) ->Any:
'''simple docstring'''
return super().call(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart")
lowerCamelCase__: Union[str, Any] =bart_model.generate(UpperCAmelCase_ , foo="bar").numpy()
self.assertTrue(np.array_equal(UpperCAmelCase_ , UpperCAmelCase_))
class _SCREAMING_SNAKE_CASE ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any]) ->Optional[int]:
'''simple docstring'''
return super().call(UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =FakeEncoder(bart_model.config , bart_model.model.shared)
lowerCamelCase__: int =fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCamelCase__: Any =bart_model.generate(UpperCAmelCase_).numpy()
with self.assertRaises(UpperCAmelCase_):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCAmelCase_ , foo="bar")
| 10 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 136 | 0 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Dict = word.split()
def justify(__SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> str:
lowercase_ : str = max_width - width
lowercase_ : List[Any] = len(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowercase_ : int = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowercase_ : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowercase_ : int = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__SCREAMING_SNAKE_CASE ):
num_spaces_between_words_list[i] += 1
lowercase_ : Union[str, Any] = []
for i in range(__SCREAMING_SNAKE_CASE ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = []
lowercase_ : list[str] = []
lowercase_ : List[str] = 0
for word in words:
if width + len(__SCREAMING_SNAKE_CASE ) + len(__SCREAMING_SNAKE_CASE ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__SCREAMING_SNAKE_CASE )
width += len(__SCREAMING_SNAKE_CASE )
else:
# justify the line and add it to result
answer.append(justify(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# reset new line and new width
lowercase_ , lowercase_ : int = [word], len(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = max_width - width - len(__SCREAMING_SNAKE_CASE )
answer.append(''' '''.join(__SCREAMING_SNAKE_CASE ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 264 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Tuple = prime_factors(__SCREAMING_SNAKE_CASE )
if is_square_free(__SCREAMING_SNAKE_CASE ):
return -1 if len(__SCREAMING_SNAKE_CASE ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A( a , unittest.TestCase ):
snake_case_ = CTRLTokenizer
snake_case_ = False
snake_case_ = False
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
__a = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__a = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
__a = {'''unk_token''': '''<unk>'''}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = '''adapt react readapt apt'''
__a = '''adapt react readapt apt'''
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a = '''adapt react readapt apt'''
__a = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
__a = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
__a = tokens + [tokenizer.unk_token]
__a = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case ) | 6 |
# flake8: noqa
# Lint as: python3
A : Optional[Any] = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 6 | 1 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[str] = len(SCREAMING_SNAKE_CASE )
A_ : List[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
A_ : Union[str, Any] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
A_ : Optional[int] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
A_ : Optional[Any] = subset[i - 1][j]
if arr[i - 1] <= j:
A_ : Optional[Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->int:
'''simple docstring'''
A_ : List[str] = tempfile.mkdtemp()
A_ : Optional[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Union[str, Any] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A_ : Tuple = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = self.get_tokenizer()
A_ : int = self.get_rust_tokenizer()
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
A_ : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
A_ : List[str] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
A_ : int = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Dict = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Any = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
A_ : Optional[Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : str = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : Optional[Any] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Tuple = self.prepare_image_inputs()
A_ : Optional[Any] = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
A_ : Union[str, Any] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Union[str, Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : List[str] = '''lower newer'''
A_ : Tuple = processor(text=_SCREAMING_SNAKE_CASE )
A_ : int = tokenizer(_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = '''lower newer'''
A_ : List[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : List[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Tuple = processor.batch_decode(_SCREAMING_SNAKE_CASE )
A_ : int = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Dict = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Optional[Any] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : int = '''lower newer'''
A_ : Dict = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 65 | 0 |
# using dfs for finding eulerian path traversal
def __lowercase ( a__ , a__ , a__ , a__=None ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True, True
__SCREAMING_SNAKE_CASE = dfs(a__ , a__ , a__ , a__ )
return path
def __lowercase ( a__ , a__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = -1
for i in range(a__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__SCREAMING_SNAKE_CASE = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __lowercase ( a__ , a__ ) -> List[Any]:
__SCREAMING_SNAKE_CASE = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = check_circuit_or_path(a__ , a__ )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
__SCREAMING_SNAKE_CASE = 1
if check == 2:
__SCREAMING_SNAKE_CASE = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
__SCREAMING_SNAKE_CASE = dfs(a__ , a__ , a__ )
print(a__ )
def __lowercase ( ) -> Any:
__SCREAMING_SNAKE_CASE = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__SCREAMING_SNAKE_CASE = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__SCREAMING_SNAKE_CASE = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__SCREAMING_SNAKE_CASE = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
__SCREAMING_SNAKE_CASE = {
1: [],
2: []
# all degree is zero
}
__SCREAMING_SNAKE_CASE = 10
check_euler(a__ , a__ )
check_euler(a__ , a__ )
check_euler(a__ , a__ )
check_euler(a__ , a__ )
check_euler(a__ , a__ )
if __name__ == "__main__":
main()
| 257 |
def __lowercase ( a__ ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowerCAmelCase__ : Optional[Any] =int(input('''Enter number: ''').strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 257 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = DanceDiffusionPipeline
lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowerCamelCase = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowerCamelCase = False
lowerCamelCase = False
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case : int = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=UpperCamelCase__ , use_timestep_embedding=UpperCamelCase__ , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
snake_case : Optional[int] = IPNDMScheduler()
snake_case : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=0 ) -> Optional[int]:
"""simple docstring"""
if str(UpperCamelCase__ ).startswith('''mps''' ):
snake_case : List[str] = torch.manual_seed(UpperCamelCase__ )
else:
snake_case : Optional[Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
snake_case : str = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
snake_case : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : List[Any] = self.get_dummy_components()
snake_case : Any = DanceDiffusionPipeline(**UpperCamelCase__ )
snake_case : Dict = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : str = self.get_dummy_inputs(UpperCamelCase__ )
snake_case : Union[str, Any] = pipe(**UpperCamelCase__ )
snake_case : Union[str, Any] = output.audios
snake_case : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
snake_case : List[str] = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
snake_case : Optional[Any] = torch_device
snake_case : Dict = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
snake_case : int = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Any = torch.manual_seed(0 )
snake_case : int = pipe(generator=UpperCamelCase__ , num_inference_steps=100 , audio_length_in_s=4.096 )
snake_case : Any = output.audios
snake_case : Any = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
snake_case : List[Any] = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = torch_device
snake_case : Optional[int] = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
snake_case : Tuple = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Union[str, Any] = torch.manual_seed(0 )
snake_case : Dict = pipe(generator=UpperCamelCase__ , num_inference_steps=100 , audio_length_in_s=4.096 )
snake_case : Optional[Any] = output.audios
snake_case : Optional[int] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
snake_case : str = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 83 |
'''simple docstring'''
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : list[int] ) -> None:
"""simple docstring"""
snake_case : List[Any] = len(UpperCamelCase__ )
snake_case : Tuple = [0] * len_array
if len_array > 0:
snake_case : List[str] = array[0]
for i in range(1 , UpperCamelCase__ ):
snake_case : Tuple = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase ( self : str , UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
snake_case : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
from __future__ import annotations
from math import pow, sqrt
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) - pow(SCREAMING_SNAKE_CASE , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) - pow(SCREAMING_SNAKE_CASE , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(SCREAMING_SNAKE_CASE , 2 ) + pow(SCREAMING_SNAKE_CASE , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any =tempfile.mkdtemp()
# fmt: off
a__ : List[Any] =["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : str =dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
a__ : List[Any] =["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
a__ : Optional[int] ={"unk_token": "<unk>"}
a__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
a__ : Optional[Any] ={
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
a__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
a__ : List[Any] =[Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] =self.get_tokenizer()
a__ : int =self.get_rust_tokenizer()
a__ : List[str] =self.get_image_processor()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
a__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ : str =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : int =self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : str =self.get_image_processor()
a__ : Optional[int] =self.get_tokenizer()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : str =self.prepare_image_inputs()
a__ : Any =image_processor(lowerCAmelCase__ , return_tensors="np" )
a__ : Optional[int] =processor(images=lowerCAmelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : List[Any] =self.get_tokenizer()
a__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Union[str, Any] ="lower newer"
a__ : List[str] =processor(text=lowerCAmelCase__ )
a__ : str =tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.get_image_processor()
a__ : Dict =self.get_tokenizer()
a__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict ="lower newer"
a__ : int =self.prepare_image_inputs()
a__ : Any =processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =self.get_image_processor()
a__ : Optional[Any] =self.get_tokenizer()
a__ : str =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : int =self.prepare_image_inputs()
a__ : Union[str, Any] =self.prepare_image_inputs()
a__ : Tuple =processor(images=lowerCAmelCase__ , visual_prompt=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : Any =self.get_tokenizer()
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : Optional[Any] =processor.batch_decode(lowerCAmelCase__ )
a__ : Dict =tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 95 | 1 |
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : int = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase_ : list[bool | None] = [None] * 10000000
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : Tuple = False
def __A ( lowerCAmelCase_ ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCAmelCase : Union[str, Any] = chain(next_number(lowerCAmelCase_ ) )
_UpperCAmelCase : List[Any] = number_chain
while number < 1000_0000:
_UpperCAmelCase : List[Any] = number_chain
number *= 10
return number_chain
def __A ( lowerCAmelCase_ = 1000_0000 ):
for i in range(1 , lowerCAmelCase_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }")
| 170 |
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCAmelCase_ : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
snake_case : Optional[datasets.Features] = None
snake_case : str = "utf-8"
snake_case : Optional[str] = None
snake_case : Optional[str] = None
snake_case : bool = True # deprecated
snake_case : Optional[int] = None # deprecated
snake_case : int = 1_0 << 2_0 # 10MB
snake_case : Optional[bool] = None
class __lowerCAmelCase ( datasets.ArrowBasedBuilder ):
snake_case : Optional[int] = JsonConfig
def snake_case_ (self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
_UpperCAmelCase : List[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def snake_case_ (self , lowerCAmelCase__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_UpperCAmelCase : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
_UpperCAmelCase : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : int = [files]
_UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_UpperCAmelCase : List[Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = [files]
_UpperCAmelCase : Union[str, Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"""files""": files} ) )
return splits
def snake_case_ (self , lowerCAmelCase__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_UpperCAmelCase : Tuple = self.config.features.arrow_schema.field(lowerCAmelCase__ ).type
_UpperCAmelCase : Any = pa_table.append_column(lowerCAmelCase__ , pa.array([None] * len(lowerCAmelCase__ ) , type=lowerCAmelCase__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_UpperCAmelCase : Optional[int] = table_cast(lowerCAmelCase__ , self.config.features.arrow_schema )
return pa_table
def snake_case_ (self , lowerCAmelCase__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowerCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_UpperCAmelCase : Optional[Any] = json.load(lowerCAmelCase__ )
# We keep only the field we are interested in
_UpperCAmelCase : Tuple = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowerCAmelCase__ , (list, tuple) ):
_UpperCAmelCase : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
_UpperCAmelCase : Optional[int] = {col: [row.get(lowerCAmelCase__ ) for row in dataset] for col in keys}
else:
_UpperCAmelCase : Optional[int] = dataset
_UpperCAmelCase : Any = pa.Table.from_pydict(lowerCAmelCase__ )
yield file_idx, self._cast_table(lowerCAmelCase__ )
# If the file has one json object per line
else:
with open(lowerCAmelCase__ , """rb""" ) as f:
_UpperCAmelCase : Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_UpperCAmelCase : List[Any] = max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
_UpperCAmelCase : Union[str, Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
_UpperCAmelCase : List[str] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowerCAmelCase__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_UpperCAmelCase : int = batch.decode(self.config.encoding , errors=lowerCAmelCase__ ).encode("""utf-8""" )
try:
while True:
try:
_UpperCAmelCase : Tuple = paj.read_json(
io.BytesIO(lowerCAmelCase__ ) , read_options=paj.ReadOptions(block_size=lowerCAmelCase__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowerCAmelCase__ , pa.ArrowInvalid )
and "straddling" not in str(lowerCAmelCase__ )
or block_size > len(lowerCAmelCase__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"Batch of {len(lowerCAmelCase__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowerCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_UpperCAmelCase : Optional[Any] = json.load(lowerCAmelCase__ )
except json.JSONDecodeError:
logger.error(F"Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # list is the only sequence type supported in JSON
try:
_UpperCAmelCase : Tuple = set().union(*[row.keys() for row in dataset] )
_UpperCAmelCase : Dict = {col: [row.get(lowerCAmelCase__ ) for row in dataset] for col in keys}
_UpperCAmelCase : int = pa.Table.from_pydict(lowerCAmelCase__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}" )
raise ValueError(F"Not able to read records in the JSON file at {file}." ) from None
yield file_idx, self._cast_table(lowerCAmelCase__ )
break
else:
logger.error(F"Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}" )
raise ValueError(
F"Not able to read records in the JSON file at {file}. "
F"You should probably indicate the field of the JSON file containing your records. "
F"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. "
F"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
batch_idx += 1
| 170 | 1 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase_ ( A_):
UpperCamelCase__: Tuple = SwinConfig()
UpperCamelCase__: int = swin_name.split("_")
UpperCamelCase__: Optional[Any] = name_split[1]
UpperCamelCase__: Optional[Any] = int(name_split[4])
UpperCamelCase__: int = int(name_split[3][-1])
if model_size == "tiny":
UpperCamelCase__: Optional[int] = 96
UpperCamelCase__: int = (2, 2, 6, 2)
UpperCamelCase__: Optional[Any] = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase__: Any = 96
UpperCamelCase__: Optional[Any] = (2, 2, 18, 2)
UpperCamelCase__: Tuple = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase__: Optional[Any] = 1_28
UpperCamelCase__: List[str] = (2, 2, 18, 2)
UpperCamelCase__: Dict = (4, 8, 16, 32)
else:
UpperCamelCase__: int = 1_92
UpperCamelCase__: Optional[int] = (2, 2, 18, 2)
UpperCamelCase__: Any = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCamelCase__: List[str] = 2_18_41
else:
UpperCamelCase__: Union[str, Any] = 10_00
UpperCamelCase__: str = "huggingface/label-files"
UpperCamelCase__: Tuple = "imagenet-1k-id2label.json"
UpperCamelCase__: str = json.load(open(hf_hub_download(A_ ,A_ ,repo_type="dataset") ,"r"))
UpperCamelCase__: List[str] = {int(A_): v for k, v in idalabel.items()}
UpperCamelCase__: List[Any] = idalabel
UpperCamelCase__: List[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__: List[Any] = img_size
UpperCamelCase__: str = num_classes
UpperCamelCase__: Tuple = embed_dim
UpperCamelCase__: Any = depths
UpperCamelCase__: Any = num_heads
UpperCamelCase__: Union[str, Any] = window_size
return config
def lowerCAmelCase_ ( A_):
if "patch_embed.proj" in name:
UpperCamelCase__: Optional[Any] = name.replace("patch_embed.proj" ,"embeddings.patch_embeddings.projection")
if "patch_embed.norm" in name:
UpperCamelCase__: Union[str, Any] = name.replace("patch_embed.norm" ,"embeddings.norm")
if "layers" in name:
UpperCamelCase__: str = "encoder." + name
if "attn.proj" in name:
UpperCamelCase__: Union[str, Any] = name.replace("attn.proj" ,"attention.output.dense")
if "attn" in name:
UpperCamelCase__: Any = name.replace("attn" ,"attention.self")
if "norm1" in name:
UpperCamelCase__: Any = name.replace("norm1" ,"layernorm_before")
if "norm2" in name:
UpperCamelCase__: List[str] = name.replace("norm2" ,"layernorm_after")
if "mlp.fc1" in name:
UpperCamelCase__: Optional[Any] = name.replace("mlp.fc1" ,"intermediate.dense")
if "mlp.fc2" in name:
UpperCamelCase__: Optional[int] = name.replace("mlp.fc2" ,"output.dense")
if name == "norm.weight":
UpperCamelCase__: int = "layernorm.weight"
if name == "norm.bias":
UpperCamelCase__: Optional[int] = "layernorm.bias"
if "head" in name:
UpperCamelCase__: Optional[Any] = name.replace("head" ,"classifier")
else:
UpperCamelCase__: int = "swin." + name
return name
def lowerCAmelCase_ ( A_ ,A_):
for key in orig_state_dict.copy().keys():
UpperCamelCase__: Union[str, Any] = orig_state_dict.pop(A_)
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase__: Optional[int] = key.split(".")
UpperCamelCase__: Optional[Any] = int(key_split[1])
UpperCamelCase__: List[Any] = int(key_split[3])
UpperCamelCase__: Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__: Optional[int] = val[:dim, :]
UpperCamelCase__: Optional[Any] = val[
dim : dim * 2, :
]
UpperCamelCase__: Optional[int] = val[-dim:, :]
else:
UpperCamelCase__: Union[str, Any] = val[
:dim
]
UpperCamelCase__: List[Any] = val[
dim : dim * 2
]
UpperCamelCase__: Dict = val[
-dim:
]
else:
UpperCamelCase__: List[Any] = val
return orig_state_dict
def lowerCAmelCase_ ( A_ ,A_):
UpperCamelCase__: List[Any] = timm.create_model(A_ ,pretrained=A_)
timm_model.eval()
UpperCamelCase__: List[str] = get_swin_config(A_)
UpperCamelCase__: int = SwinForImageClassification(A_)
model.eval()
UpperCamelCase__: int = convert_state_dict(timm_model.state_dict() ,A_)
model.load_state_dict(A_)
UpperCamelCase__: Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__: List[str] = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" ,"-")))
UpperCamelCase__: int = Image.open(requests.get(A_ ,stream=A_).raw)
UpperCamelCase__: str = image_processor(images=A_ ,return_tensors="pt")
UpperCamelCase__: int = timm_model(inputs["pixel_values"])
UpperCamelCase__: str = model(**A_).logits
assert torch.allclose(A_ ,A_ ,atol=1e-3)
print(F"Saving model {swin_name} to {pytorch_dump_folder_path}")
model.save_pretrained(A_)
print(F"Saving image processor to {pytorch_dump_folder_path}")
image_processor.save_pretrained(A_)
if __name__ == "__main__":
A__: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__: str = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 149 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A__: str = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[str] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: str = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
A__: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149 | 1 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __snake_case( enum.Enum ):
'''simple docstring'''
UpperCAmelCase : str = 0
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : str = 2
@add_end_docstrings(_lowerCAmelCase )
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Dict = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self , *A_ , **A_ ) -> Dict:
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def __snake_case ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , **A_ , ) -> List[Any]:
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
""" [None, \'hole\']""" )
lowerCAmelCase = handle_long_generation
preprocess_params.update(_lowercase )
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __snake_case ( self , *A_ , **A_ ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self , A_ , **A_ ) -> Dict:
return super().__call__(_lowercase , **_lowercase )
def __snake_case ( self , A_ , A_="" , A_=None , **A_ ) -> Dict:
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def __snake_case ( self , A_ , **A_ ) -> Any:
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __snake_case ( self , A_ , A_=ReturnType.FULL_TEXT , A_=True ) -> List[str]:
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(_lowercase )
return records | 370 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
UpperCAmelCase = False
UpperCAmelCase = False
def _snake_case ( _SCREAMING_SNAKE_CASE : Namespace ) -> Tuple:
"""simple docstring"""
return TrainCommand(_SCREAMING_SNAKE_CASE )
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
@staticmethod
def __snake_case ( A_ ) -> Optional[int]:
lowerCAmelCase = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=A_ , required=A_ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=A_ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=A_ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=A_ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=A_ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=A_ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=A_ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=A_ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=A_ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=A_ , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=A_ , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=A_ , default=3e-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=A_ , default=1e-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=A_ )
def __init__( self , A_ ) -> Tuple:
lowerCAmelCase = logging.get_logger("""transformers-cli/training""" )
lowerCAmelCase = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=A_ )
lowerCAmelCase = args.output
lowerCAmelCase = args.column_label
lowerCAmelCase = args.column_text
lowerCAmelCase = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
lowerCAmelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
lowerCAmelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
lowerCAmelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase = args.validation_split
lowerCAmelCase = args.train_batch_size
lowerCAmelCase = args.valid_batch_size
lowerCAmelCase = args.learning_rate
lowerCAmelCase = args.adam_epsilon
def __snake_case ( self ) -> Optional[int]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __snake_case ( self ) -> Tuple:
raise NotImplementedError
def __snake_case ( self ) -> Tuple:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 187 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __A :
a__ : str = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
a__ : str = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
a__ : str = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
a__ : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
a__ : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = HfArgumentParser((ModelArguments,) )
(UpperCAmelCase_ ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
UpperCAmelCase_ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
UpperCAmelCase_ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
UpperCAmelCase_ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
UpperCAmelCase_ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=snake_case_ , decoder_config=snake_case_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
UpperCAmelCase_ = decoder_config.decoder_start_token_id
UpperCAmelCase_ = decoder_config.pad_token_id
if decoder_start_token_id is None:
UpperCAmelCase_ = decoder_config.bos_token_id
if pad_token_id is None:
UpperCAmelCase_ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
UpperCAmelCase_ = decoder_config.eos_token_id
UpperCAmelCase_ = decoder_start_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 1 |
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , A : int = 6 ) ->None:
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
self.create_linked_list(A )
def __lowerCamelCase ( self : Optional[int] , A : int ) ->None:
lowerCamelCase__ : Optional[int] = Node()
lowerCamelCase__ : List[str] = current_node
lowerCamelCase__ : Union[str, Any] = current_node
lowerCamelCase__ : List[str] = current_node
for _ in range(1 , A ):
lowerCamelCase__ : List[str] = Node()
lowerCamelCase__ : List[Any] = current_node
lowerCamelCase__ : Optional[Any] = previous_node
lowerCamelCase__ : Dict = current_node
lowerCamelCase__ : Union[str, Any] = self.front
lowerCamelCase__ : int = previous_node
def __lowerCamelCase ( self : Optional[int] ) ->bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __lowerCamelCase ( self : Optional[int] ) ->Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __lowerCamelCase ( self : Optional[int] , A : Any ) ->None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCamelCase__ : List[str] = self.rear.next
if self.rear:
lowerCamelCase__ : Optional[Any] = data
def __lowerCamelCase ( self : str ) ->Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCamelCase__ : List[Any] = self.front.data
lowerCamelCase__ : Optional[Any] = None
return data
lowerCamelCase__ : Optional[int] = self.front
lowerCamelCase__ : Optional[int] = old_front.next
lowerCamelCase__ : Any = old_front.data
lowerCamelCase__ : List[str] = None
return data
def __lowerCamelCase ( self : Dict ) ->None:
if self.is_empty():
raise Exception('''Empty Queue''' )
def __lowerCamelCase ( self : int ) ->None:
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ) ->None:
lowerCamelCase__ : Any | None = None
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
_lowerCamelCase : Tuple = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
_lowerCamelCase : str = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
_lowerCamelCase : Any = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
_lowerCamelCase : str = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
_lowerCamelCase : Dict = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
_lowerCamelCase : int = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
_lowerCamelCase : List[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
_lowerCamelCase : Tuple = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
_lowerCamelCase : Tuple = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
_lowerCamelCase : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
_lowerCamelCase : Dict = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
_lowerCamelCase : int = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
_lowerCamelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowerCamelCase : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowerCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowerCamelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowerCamelCase : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowerCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowerCamelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowercase ( _BaseAutoModelClass ):
lowercase__ : Union[str, Any] = FLAX_MODEL_MAPPING
_lowerCamelCase : int = auto_class_update(FlaxAutoModel)
class lowercase ( _BaseAutoModelClass ):
lowercase__ : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowerCamelCase : int = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowercase ( _BaseAutoModelClass ):
lowercase__ : int = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowercase ( _BaseAutoModelClass ):
lowercase__ : Optional[int] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowerCamelCase : Any = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowercase ( _BaseAutoModelClass ):
lowercase__ : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCamelCase : List[str] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowercase ( _BaseAutoModelClass ):
lowercase__ : List[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCamelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowercase ( _BaseAutoModelClass ):
lowercase__ : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowerCamelCase : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowercase ( _BaseAutoModelClass ):
lowercase__ : Optional[int] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCamelCase : int = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowercase ( _BaseAutoModelClass ):
lowercase__ : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowerCamelCase : str = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowercase ( _BaseAutoModelClass ):
lowercase__ : Dict = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowerCamelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowercase ( _BaseAutoModelClass ):
lowercase__ : Union[str, Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase : str = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowercase ( _BaseAutoModelClass ):
lowercase__ : Union[str, Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCamelCase : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowercase ( _BaseAutoModelClass ):
lowercase__ : int = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowerCamelCase : str = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 206 | from random import randint, random
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 5 , ):
SCREAMING_SNAKE_CASE = [[-1] * number_of_cells] # Create a highway without any car
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , 0 )
while i < number_of_cells:
SCREAMING_SNAKE_CASE = (
randint(0 , UpperCAmelCase__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = highway_now[car_index + 1 :]
for cell in range(len(UpperCAmelCase__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(UpperCAmelCase__ , -1 )
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : float , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
# Beforce calculations, the highway is empty
SCREAMING_SNAKE_CASE = [-1] * number_of_cells
for car_index in range(UpperCAmelCase__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
SCREAMING_SNAKE_CASE = min(highway_now[car_index] + 1 , UpperCAmelCase__ )
# Number of empty cell before the next car
SCREAMING_SNAKE_CASE = get_distance(UpperCAmelCase__ , UpperCAmelCase__ ) - 1
# We can't have the car causing an accident
SCREAMING_SNAKE_CASE = min(next_highway[car_index] , UpperCAmelCase__ )
if random() < probability:
# Randomly, a driver will slow down
SCREAMING_SNAKE_CASE = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = len(highway[0] )
for i in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = update(highway[i] , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = [-1] * number_of_cells
for car_index in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
SCREAMING_SNAKE_CASE = (car_index + speed) % number_of_cells
# Commit the change of position
SCREAMING_SNAKE_CASE = speed
highway.append(UpperCAmelCase__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 206 | 1 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
A : Dict = re.compile(R"([A-Z]+)([A-Z][a-z])")
A : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
A : Any = re.compile(R"(?<!_)_(?!_)")
A : List[Any] = re.compile(R"(_{2,})")
A : Union[str, Any] = R"^\w+(\.\w+)*$"
A : Union[str, Any] = R"<>:/\|?*"
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = _uppercase_uppercase_re.sub(R"\1_\2" , _UpperCamelCase )
__lowerCAmelCase = _lowercase_uppercase_re.sub(R"\1_\2" , _UpperCamelCase )
return name.lower()
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = _single_underscore_re.split(_UpperCamelCase )
__lowerCAmelCase = [_multiple_underscores_re.split(_UpperCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_UpperCamelCase ) if n != "" )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if os.path.basename(_UpperCamelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if os.path.basename(_UpperCamelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , _UpperCamelCase ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(_UpperCamelCase )}-{split}"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowerCAmelCase = filename_prefix_for_split(_UpperCamelCase , _UpperCamelCase )
if filetype_suffix:
prefix += f".{filetype_suffix}"
__lowerCAmelCase = os.path.join(_UpperCamelCase , _UpperCamelCase )
return f"{filepath}*"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ):
'''simple docstring'''
__lowerCAmelCase = filename_prefix_for_split(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = os.path.join(_UpperCamelCase , _UpperCamelCase )
if shard_lengths:
__lowerCAmelCase = len(_UpperCamelCase )
__lowerCAmelCase = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(_UpperCamelCase )]
if filetype_suffix:
__lowerCAmelCase = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
__lowerCAmelCase = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 57 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="audio-spectrogram-transformer"
def __init__( self , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=16 , UpperCamelCase_=True , UpperCamelCase_=10 , UpperCamelCase_=10 , UpperCamelCase_=10_24 , UpperCamelCase_=1_28 , **UpperCamelCase_ , ) -> Optional[int]:
super().__init__(**UpperCamelCase_ )
__lowercase : Optional[Any] = hidden_size
__lowercase : List[str] = num_hidden_layers
__lowercase : List[str] = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : List[str] = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : Dict = initializer_range
__lowercase : Optional[int] = layer_norm_eps
__lowercase : Optional[int] = patch_size
__lowercase : List[str] = qkv_bias
__lowercase : Union[str, Any] = frequency_stride
__lowercase : List[Any] = time_stride
__lowercase : Tuple = max_length
__lowercase : int = num_mel_bins
| 249 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Optional[int] = 'luke'
def __init__( self , __UpperCAmelCase=50267 , __UpperCAmelCase=500000 , __UpperCAmelCase=768 , __UpperCAmelCase=256 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-1_2 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> Dict:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_a = vocab_size
_a = entity_vocab_size
_a = hidden_size
_a = entity_emb_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = use_entity_aware_attention
_a = classifier_dropout | 153 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Dict:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _UpperCAmelCase ( self ) -> str:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__UpperCAmelCase , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 153 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Any, lowerCamelCase : Any=13, lowerCamelCase : int=7, lowerCamelCase : List[str]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Optional[int]=True, lowerCamelCase : List[str]=True, lowerCamelCase : Dict=99, lowerCamelCase : Union[str, Any]=32, lowerCamelCase : str=5, lowerCamelCase : Any=4, lowerCamelCase : Optional[int]=37, lowerCamelCase : Tuple="gelu", lowerCamelCase : List[str]=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : str=512, lowerCamelCase : Tuple=16, lowerCamelCase : Optional[Any]=2, lowerCamelCase : Any=0.02, lowerCamelCase : List[Any]=4, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = RoFormerConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = True
lowercase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = FlaxRoFormerModelTester(self )
@slow
def lowercase__ ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''', from_pt=lowerCamelCase )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase__ = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowercase__ = model(lowerCamelCase )[0]
lowercase__ = 50_000
lowercase__ = (1, 6, vocab_size)
self.assertEqual(output.shape, lowerCamelCase )
lowercase__ = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3], lowerCamelCase, atol=1E-4 ) )
| 207 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowercase__ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
lowercase__ = model.state_dict()
def to_tf_var_name(lowerCamelCase_ ):
for patt, repl in iter(lowerCamelCase_ ):
lowercase__ = name.replace(lowerCamelCase_ , lowerCamelCase_ )
return F"""bert/{name}"""
def create_tf_var(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ = tf.get_variable(dtype=lowerCamelCase_ , shape=tensor.shape , name=lowerCamelCase_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCamelCase_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ = to_tf_var_name(lowerCamelCase_ )
lowercase__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ = torch_tensor.T
lowercase__ = create_tf_var(tensor=lowerCamelCase_ , name=lowerCamelCase_ , session=lowerCamelCase_ )
tf.keras.backend.set_value(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = session.run(lowerCamelCase_ )
print(F"""Successfully created {tf_name}: {np.allclose(lowerCamelCase_ , lowerCamelCase_ )}""" )
lowercase__ = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def a ( lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Directory in which to save tensorflow model''' )
lowercase__ = parser.parse_args(lowerCamelCase_ )
lowercase__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCamelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 207 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : str ) -> Tuple:
UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
UpperCAmelCase : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
UpperCAmelCase : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase : Dict = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase : int = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase_ ( self : str ) -> List[Any]:
UpperCAmelCase : Optional[int] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
UpperCAmelCase : Dict = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ = Accelerator()
lowercase__ = (accelerator.state.process_index + 2, 10)
lowercase__ = torch.randint(0, 10, shape).to(accelerator.device)
lowercase__ = ""
lowercase__ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase__ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase__ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 280 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowercase__ = re.compile(r"([A-Z]+)([A-Z][a-z])")
lowercase__ = re.compile(r"([a-z\d])([A-Z])")
lowercase__ = re.compile(r"(?<!_)_(?!_)")
lowercase__ = re.compile(r"(_{2,})")
lowercase__ = r"^\w+(\.\w+)*$"
lowercase__ = r"<>:/\|?*"
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = _uppercase_uppercase_re.sub(R'\1_\2' , UpperCAmelCase_ )
UpperCAmelCase : str = _lowercase_uppercase_re.sub(R'\1_\2' , UpperCAmelCase_ )
return name.lower()
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = _single_underscore_re.split(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = [_multiple_underscores_re.split(UpperCAmelCase_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCAmelCase_ ) if n != '' )
def UpperCamelCase( UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , UpperCAmelCase_ ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(UpperCAmelCase_ )}-{split}"""
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
UpperCAmelCase : Dict = filename_prefix_for_split(UpperCAmelCase_ , UpperCAmelCase_ )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
UpperCAmelCase : Optional[int] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
return F"""{filepath}*"""
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None ):
UpperCAmelCase : Optional[int] = filename_prefix_for_split(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if shard_lengths:
UpperCAmelCase : List[str] = len(UpperCAmelCase_ )
UpperCAmelCase : List[str] = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(UpperCAmelCase_ )]
if filetype_suffix:
UpperCAmelCase : Dict = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
UpperCAmelCase : Optional[Any] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 280 | 1 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case : Any = False
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : int = "ybelkada/fonts"
def __lowercase ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
requires_backends(_lowercase , ['torch'] )
_check_torch_version()
a__ = image_tensor.unsqueeze(0 )
a__ = torch.nn.functional.unfold(_lowercase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
a__ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _lowercase , _lowercase , -1 )
a__ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : int = 3_6 , __lowerCAmelCase : str = "black" , __lowerCAmelCase : str = "white" , __lowerCAmelCase : int = 5 , __lowerCAmelCase : int = 5 , __lowerCAmelCase : int = 5 , __lowerCAmelCase : int = 5 , __lowerCAmelCase : Optional[bytes] = None , __lowerCAmelCase : Optional[str] = None , ):
requires_backends(_lowercase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
a__ = textwrap.TextWrapper(width=8_0 )
a__ = wrapper.wrap(text=_lowercase )
a__ = """\n""".join(_lowercase )
if font_bytes is not None and font_path is None:
a__ = io.BytesIO(_lowercase )
elif font_path is not None:
a__ = font_path
else:
a__ = hf_hub_download(_lowercase , 'Arial.TTF' )
a__ = ImageFont.truetype(_lowercase , encoding='UTF-8' , size=_lowercase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
a__ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , _lowercase ) )
a__ = temp_draw.textbbox((0, 0) , _lowercase , _lowercase )
# Create the actual image with a bit of padding around the text.
a__ = text_width + left_padding + right_padding
a__ = text_height + top_padding + bottom_padding
a__ = Image.new('RGB' , (image_width, image_height) , _lowercase )
a__ = ImageDraw.Draw(_lowercase )
draw.text(xy=(left_padding, top_padding) , text=_lowercase , fill=_lowercase , font=_lowercase )
return image
def __lowercase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : str , **__lowerCAmelCase : List[Any] ):
requires_backends(_lowercase , 'vision' )
# Convert to PIL image if necessary
a__ = to_pil_image(_lowercase )
a__ = render_text(_lowercase , **_lowercase )
a__ = max(header_image.width , image.width )
a__ = int(image.height * (new_width / image.width) )
a__ = int(header_image.height * (new_width / header_image.width) )
a__ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
a__ = to_numpy_array(_lowercase )
if infer_channel_dimension_format(_lowercase ) == ChannelDimension.LAST:
a__ = to_channel_dimension_format(_lowercase , ChannelDimension.LAST )
return new_image
class snake_case_ (A__ ):
UpperCAmelCase__ : int = ["flattened_patches"]
def __init__( self :Tuple ,__snake_case :str = True ,__snake_case :Optional[int] = True ,__snake_case :Tuple = None ,__snake_case :Optional[int] = 20_48 ,__snake_case :int = False ,**__snake_case :List[Any] ,) -> None:
super().__init__(**__lowercase )
a__ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
a__ = do_normalize
a__ = do_convert_rgb
a__ = max_patches
a__ = is_vqa
def lowerCamelCase__( self :str ,__snake_case :Dict ,__snake_case :Dict ,__snake_case :Dict ,**__snake_case :str ) -> np.ndarray:
requires_backends(self.extract_flattened_patches ,'torch' )
_check_torch_version()
# convert to torch
a__ = to_channel_dimension_format(__lowercase ,ChannelDimension.FIRST )
a__ = torch.from_numpy(__lowercase )
a__ = patch_size["""height"""], patch_size["""width"""]
a__ = get_image_size(__lowercase )
# maximize scale s.t.
a__ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
a__ = max(min(math.floor(scale * image_height / patch_height ) ,__lowercase ) ,1 )
a__ = max(min(math.floor(scale * image_width / patch_width ) ,__lowercase ) ,1 )
a__ = max(num_feasible_rows * patch_height ,1 )
a__ = max(num_feasible_cols * patch_width ,1 )
a__ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='bilinear' ,align_corners=__lowercase ,antialias=__lowercase ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
a__ = torch_extract_patches(__lowercase ,__lowercase ,__lowercase )
a__ = patches.shape
a__ = patches_shape[1]
a__ = patches_shape[2]
a__ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
a__ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
a__ = torch.arange(__lowercase ).reshape([rows, 1] ).repeat(1 ,__lowercase ).reshape([rows * columns, 1] )
a__ = torch.arange(__lowercase ).reshape([1, columns] ).repeat(__lowercase ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
a__ = row_ids.to(torch.floataa )
a__ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
a__ = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
a__ = torch.nn.functional.pad(__lowercase ,[0, 0, 0, max_patches - (rows * columns)] ).float()
a__ = to_numpy_array(__lowercase )
return result
def lowerCamelCase__( self :Optional[int] ,__snake_case :Any ,__snake_case :int = None ,**__snake_case :Dict ) -> np.ndarray:
if image.dtype == np.uinta:
a__ = image.astype(np.floataa )
# take mean across the whole `image`
a__ = np.mean(__lowercase )
a__ = np.std(__lowercase )
a__ = max(__lowercase ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(__lowercase ,mean=__lowercase ,std=__lowercase ,**__lowercase )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Dict ,__snake_case :int = None ,__snake_case :str = None ,__snake_case :Union[str, Any] = None ,__snake_case :Any = None ,__snake_case :Optional[int] = None ,__snake_case :Dict = None ,__snake_case :List[Any] = ChannelDimension.FIRST ,**__snake_case :List[str] ,) -> ImageInput:
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a__ = patch_size if patch_size is not None else self.patch_size
a__ = max_patches if max_patches is not None else self.max_patches
a__ = self.is_vqa
if kwargs.get('data_format' ,__lowercase ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
a__ = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a__ = [convert_to_rgb(__lowercase ) for image in images]
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__lowercase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
a__ = kwargs.pop('font_bytes' ,__lowercase )
a__ = kwargs.pop('font_path' ,__lowercase )
if isinstance(__lowercase ,__lowercase ):
a__ = [header_text] * len(__lowercase )
a__ = [
render_header(__lowercase ,header_text[i] ,font_bytes=__lowercase ,font_path=__lowercase )
for i, image in enumerate(__lowercase )
]
if do_normalize:
a__ = [self.normalize(image=__lowercase ) for image in images]
# convert to torch tensor and permute
a__ = [
self.extract_flattened_patches(image=__lowercase ,max_patches=__lowercase ,patch_size=__lowercase )
for image in images
]
# create attention mask in numpy
a__ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
a__ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} ,tensor_type=__lowercase )
return encoded_outputs
| 240 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCAmelCase_ ( _lowercase : Dict , _lowercase : str , _lowercase : str , _lowercase : Optional[Any]=1024) -> List[Any]:
"""simple docstring"""
a__ , a__ : Optional[int] = [], []
a__ : Union[str, Any] = list(zip(_lowercase , _lowercase))
a__ , a__ : List[Any] = sorted_examples[0]
def is_too_big(_lowercase : Tuple):
return tok(_lowercase , return_tensors="""pt""").input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:]):
a__ : Tuple = new_src + """ """ + src
a__ : Any = new_tgt + """ """ + tgt
if is_too_big(_lowercase) or is_too_big(_lowercase): # cant fit, finalize example
finished_src.append(_lowercase)
finished_tgt.append(_lowercase)
a__ , a__ : List[Any] = src, tgt
else: # can fit, keep adding
a__ , a__ : Tuple = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_lowercase)
finished_tgt.append(_lowercase)
return finished_src, finished_tgt
def lowerCAmelCase_ ( _lowercase : str , _lowercase : Path , _lowercase : Any , _lowercase : str) -> Tuple:
"""simple docstring"""
a__ : Any = Path(_lowercase)
save_path.mkdir(exist_ok=_lowercase)
for split in ["train"]:
a__ , a__ : List[Any] = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
a__ : Dict = [x.rstrip() for x in Path(_lowercase).open().readlines()]
a__ : Optional[Any] = [x.rstrip() for x in Path(_lowercase).open().readlines()]
a__ , a__ : List[Any] = pack_examples(_lowercase , _lowercase , _lowercase , _lowercase)
print(F'''packed {split} split from {len(_lowercase)} examples -> {len(_lowercase)}.''')
Path(save_path / F'''{split}.source''').open("""w""").write("""\n""".join(_lowercase))
Path(save_path / F'''{split}.target''').open("""w""").write("""\n""".join(_lowercase))
for split in ["val", "test"]:
a__ , a__ : Any = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(_lowercase , save_path / F'''{split}.source''')
shutil.copyfile(_lowercase , save_path / F'''{split}.target''')
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=_lowercase , help="""like facebook/bart-large-cnn,t5-base, etc.""")
parser.add_argument("""--max_seq_len""" , type=_lowercase , default=128)
parser.add_argument("""--data_dir""" , type=_lowercase)
parser.add_argument("""--save_path""" , type=_lowercase)
a__ : List[Any] = parser.parse_args()
a__ : List[Any] = AutoTokenizer.from_pretrained(args.tok_name)
return pack_data_dir(_lowercase , Path(args.data_dir) , args.max_seq_len , args.save_path)
if __name__ == "__main__":
packer_cli()
| 170 | 0 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> List[int]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
return list(tensor.shape )
UpperCamelCase_ = tf.shape(SCREAMING_SNAKE_CASE_ )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE_ ):
return dynamic
UpperCamelCase_ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE_ )]
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None )-> tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=-1 )-> Any:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
UpperCamelCase_ , UpperCamelCase_ = tf.nn.moments(SCREAMING_SNAKE_CASE_ , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCamelCase_ = [1] * inputs.shape.rank
UpperCamelCase_ = shape_list(SCREAMING_SNAKE_CASE_ )[axis]
UpperCamelCase_ = tf.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = tf.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Compute layer normalization using the batch_normalization
# function.
UpperCamelCase_ = tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , offset=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , variance_epsilon=SCREAMING_SNAKE_CASE_ , )
return outputs
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=-1 )-> Tuple:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCamelCase_ = tf.shape(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCamelCase_ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> tf.Tensor:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , tf.Tensor ):
UpperCamelCase_ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCamelCase_ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCamelCase_ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCamelCase_ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "input_ids" )-> None:
"""simple docstring"""
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE_ , tf.cast(SCREAMING_SNAKE_CASE_ , dtype=tensor.dtype ) , message=(
f"The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE_ )}) must be smaller than the embedding "
f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
"""simple docstring"""
UpperCamelCase_ = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCamelCase_ = [x for x in data if len(SCREAMING_SNAKE_CASE_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
f"bytes: {bad_attributes}" )
UpperCamelCase_ = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = 1
UpperCamelCase_ = np.array_split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCamelCase_ = np.array_split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = chunk_data
else:
UpperCamelCase_ = data
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Tuple:
"""simple docstring"""
if name in group.attrs:
UpperCamelCase_ = [n.decode("utf8" ) if hasattr(SCREAMING_SNAKE_CASE_ , "decode" ) else n for n in group.attrs[name]]
else:
UpperCamelCase_ = []
UpperCamelCase_ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(SCREAMING_SNAKE_CASE_ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
def _expand_single_ad_tensor(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE_ )
| 60 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __magic_name__ :
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=4 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.1 , _lowercase=True , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , )-> str:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_multiple_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = weight_tying
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self )-> int:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Optional[Any]:
UpperCamelCase_ = GPTNeoXJapaneseModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase )
UpperCamelCase_ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Dict:
UpperCamelCase_ = True
UpperCamelCase_ = GPTNeoXJapaneseModel(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase )-> List[str]:
UpperCamelCase_ = GPTNeoXJapaneseForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Dict:
UpperCamelCase_ = True
UpperCamelCase_ = GPTNeoXJapaneseForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
# first forward pass
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , use_cache=_lowercase )
UpperCamelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , output_hidden_states=_lowercase )
UpperCamelCase_ = output_from_no_past["hidden_states"][0]
UpperCamelCase_ = model(
_lowercase , attention_mask=_lowercase , past_key_values=_lowercase , output_hidden_states=_lowercase , )["hidden_states"][0]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase_ :Optional[Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
UpperCamelCase_ :str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
UpperCamelCase_ :int = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
UpperCamelCase_ :int = False
UpperCamelCase_ :Dict = False
UpperCamelCase_ :List[str] = False
UpperCamelCase_ :int = False
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = GPTNeoXJapaneseModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Any:
# This regression test was failing with PyTorch < 1.3
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase_ = None
self.model_tester.create_and_check_model_as_decoder(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowercase )
@slow
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = "abeja/gpt-neox-japanese-2.7b"
UpperCamelCase_ = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
UpperCamelCase_ = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
UpperCamelCase_ = GPTNeoXJapaneseTokenizer.from_pretrained(_lowercase )
UpperCamelCase_ = GPTNeoXJapaneseForCausalLM.from_pretrained(_lowercase )
UpperCamelCase_ = []
for prompt in prompts:
UpperCamelCase_ = tokenizer(_lowercase , return_tensors="pt" ).input_ids
UpperCamelCase_ = model.generate(_lowercase , max_length=50 )
UpperCamelCase_ = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
predicted_outputs += generated_string
self.assertListEqual(_lowercase , _lowercase )
| 60 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_a = logging.getLogger(__name__)
if __name__ == "__main__":
_a = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
_a = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
_a = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
_a = Counter()
for tk_ids in data:
counter.update(tk_ids)
_a = [0] * args.vocab_size
for k, v in counter.items():
_a = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 39 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=3 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=10 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[1, 1, 2, 1] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=3 , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embeddings_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = len(UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModel(config=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFResNetForImageClassification(UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase = layer_type
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFResNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __A ( )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCAmelCase , return_tensors='tf' )
# forward pass
_UpperCAmelCase = model(**UpperCAmelCase )
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_UpperCAmelCase = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCAmelCase , atol=1e-4 ) )
| 39 | 1 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__UpperCamelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[datasets.Features] = None
SCREAMING_SNAKE_CASE_ : str = "utf-8"
SCREAMING_SNAKE_CASE_ : Optional[str] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
SCREAMING_SNAKE_CASE_ : bool = True # deprecated
SCREAMING_SNAKE_CASE_ : Optional[int] = None # deprecated
SCREAMING_SNAKE_CASE_ : int = 1_0 << 2_0 # 10MB
SCREAMING_SNAKE_CASE_ : Optional[bool] = None
class lowerCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = JsonConfig
def __A ( self ) -> Any:
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
SCREAMING_SNAKE_CASE = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , lowerCAmelCase__ ) -> List[Any]:
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = [files]
SCREAMING_SNAKE_CASE = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = [files]
SCREAMING_SNAKE_CASE = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={'files': files} ) )
return splits
def __A ( self , lowerCAmelCase__ ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
SCREAMING_SNAKE_CASE = self.config.features.arrow_schema.field(lowerCAmelCase__ ).type
SCREAMING_SNAKE_CASE = pa_table.append_column(lowerCAmelCase__ , pa.array([None] * len(lowerCAmelCase__ ) , type=lowerCAmelCase__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE = table_cast(lowerCAmelCase__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , lowerCAmelCase__ ) -> Any:
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowerCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase__ )
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowerCAmelCase__ , (list, tuple) ):
SCREAMING_SNAKE_CASE = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE = {col: [row.get(lowerCAmelCase__ ) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = pa.Table.from_pydict(lowerCAmelCase__ )
yield file_idx, self._cast_table(lowerCAmelCase__ )
# If the file has one json object per line
else:
with open(lowerCAmelCase__ , 'rb' ) as f:
SCREAMING_SNAKE_CASE = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE = max(self.config.chunksize // 32 , 16 << 10 )
SCREAMING_SNAKE_CASE = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
SCREAMING_SNAKE_CASE = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowerCAmelCase__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE = batch.decode(self.config.encoding , errors=lowerCAmelCase__ ).encode('utf-8' )
try:
while True:
try:
SCREAMING_SNAKE_CASE = paj.read_json(
io.BytesIO(lowerCAmelCase__ ) , read_options=paj.ReadOptions(block_size=lowerCAmelCase__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowerCAmelCase__ , pa.ArrowInvalid )
and "straddling" not in str(lowerCAmelCase__ )
or block_size > len(lowerCAmelCase__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'Batch of {len(lowerCAmelCase__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowerCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase__ )
except json.JSONDecodeError:
logger.error(F'Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE = {col: [row.get(lowerCAmelCase__ ) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE = pa.Table.from_pydict(lowerCAmelCase__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}' )
raise ValueError(F'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(lowerCAmelCase__ )
break
else:
logger.error(F'Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}' )
raise ValueError(
F'Not able to read records in the JSON file at {file}. '
F'You should probably indicate the field of the JSON file containing your records. '
F'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
F'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
batch_idx += 1
| 38 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 | 1 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Union[str, Any] = None ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = tesseract_config if tesseract_config is not None else """"""
# apply OCR
lowerCAmelCase_ :List[Any] = to_pil_image(snake_case__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = pil_image.size
lowerCAmelCase_ :int = pytesseract.image_to_data(snake_case__ , lang=snake_case__ , output_type="""dict""" , config=snake_case__ )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
lowerCAmelCase_ :List[str] = [idx for idx, word in enumerate(snake_case__ ) if not word.strip()]
lowerCAmelCase_ :List[str] = [word for idx, word in enumerate(snake_case__ ) if idx not in irrelevant_indices]
lowerCAmelCase_ :Optional[int] = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
lowerCAmelCase_ :Union[str, Any] = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
lowerCAmelCase_ :Optional[Any] = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
lowerCAmelCase_ :Dict = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase_ :Optional[int] = []
for x, y, w, h in zip(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase_ :Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(snake_case__ )
# finally, normalize the bounding boxes
lowerCAmelCase_ :Tuple = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(snake_case__ , snake_case__ , snake_case__ ) )
assert len(snake_case__ ) == len(snake_case__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[Any] = ['pixel_values']
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BILINEAR , __A = True , __A = None , __A = "" , **__A , ) -> None:
super().__init__(**__UpperCAmelCase )
lowerCAmelCase_ :Optional[int] = size if size is not None else {"""height""": 224, """width""": 224}
lowerCAmelCase_ :Any = get_size_dict(__UpperCAmelCase )
lowerCAmelCase_ :Dict = do_resize
lowerCAmelCase_ :Optional[int] = size
lowerCAmelCase_ :str = resample
lowerCAmelCase_ :Tuple = apply_ocr
lowerCAmelCase_ :List[Any] = ocr_lang
lowerCAmelCase_ :int = tesseract_config
def __lowerCAmelCase ( self , __A , __A , __A = PILImageResampling.BILINEAR , __A = None , **__A , ) -> np.ndarray:
lowerCAmelCase_ :int = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCAmelCase_ :Optional[int] = (size["""height"""], size["""width"""])
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image:
lowerCAmelCase_ :Any = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ :List[str] = size if size is not None else self.size
lowerCAmelCase_ :List[str] = get_size_dict(__UpperCAmelCase )
lowerCAmelCase_ :int = resample if resample is not None else self.resample
lowerCAmelCase_ :int = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase_ :Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase_ :List[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase_ :Optional[int] = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase_ :Tuple = [to_numpy_array(__UpperCAmelCase ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
lowerCAmelCase_ :Dict = []
lowerCAmelCase_ :Optional[int] = []
for image in images:
lowerCAmelCase_ , lowerCAmelCase_ :Any = apply_tesseract(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
words_batch.append(__UpperCAmelCase )
boxes_batch.append(__UpperCAmelCase )
if do_resize:
lowerCAmelCase_ :Optional[Any] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowerCAmelCase_ :Optional[int] = [flip_channel_order(__UpperCAmelCase ) for image in images]
lowerCAmelCase_ :Tuple = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowerCAmelCase_ :Tuple = BatchFeature(data={"""pixel_values""": images} , tensor_type=__UpperCAmelCase )
if apply_ocr:
lowerCAmelCase_ :Any = words_batch
lowerCAmelCase_ :Optional[Any] = boxes_batch
return data
| 84 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
A_ : Dict = logging.get_logger(__name__)
A_ : Any = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Tuple = 'deberta-v2'
def __init__( self : Any , __UpperCAmelCase : Optional[Any]=1_2_8_1_0_0 , __UpperCAmelCase : Optional[Any]=1_5_3_6 , __UpperCAmelCase : List[Any]=2_4 , __UpperCAmelCase : str=2_4 , __UpperCAmelCase : Optional[int]=6_1_4_4 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Optional[Any]=5_1_2 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : Any=1e-7 , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Any=-1 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : Union[str, Any]="gelu" , **__UpperCAmelCase : Any , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = relative_attention
SCREAMING_SNAKE_CASE__ = max_relative_positions
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = position_biased_input
# Backwards compatibility
if type(__UpperCAmelCase ) == str:
SCREAMING_SNAKE_CASE__ = [x.strip() for x in pos_att_type.lower().split("""|""" )]
SCREAMING_SNAKE_CASE__ = pos_att_type
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kwargs.get("""pooler_hidden_size""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pooler_dropout
SCREAMING_SNAKE_CASE__ = pooler_hidden_act
class lowerCamelCase (A__ ):
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 1_2
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional["TensorType"] = None , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 4_0 , __UpperCAmelCase : int = 4_0 , __UpperCAmelCase : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ = super().generate_dummy_inputs(preprocessor=__UpperCAmelCase , framework=__UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 165 | 0 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def __magic_name__ ( A ) -> int:
snake_case = R'\w+[.]\d+'
snake_case = re.findall(A , A )
for pat in pats:
snake_case = key.replace(A , '_'.join(pat.split('.' ) ) )
return key
def __magic_name__ ( A , A , A ) -> Any:
snake_case = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
snake_case = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
snake_case = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
snake_case = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
snake_case = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
snake_case = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __magic_name__ ( A , A , A=4_2 ) -> str:
# Step 1: Convert pytorch tensor to numpy
snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
snake_case = flax_model.init_weights(PRNGKey(A ) )
snake_case = flatten_dict(A )
snake_case = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case = rename_key(A )
snake_case = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
snake_case , snake_case = rename_key_and_reshape_tensor(A , A , A )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
snake_case = jnp.asarray(A )
return unflatten_dict(A )
| 332 |
'''simple docstring'''
from pathlib import Path
import fire
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = Path(A )
snake_case = Path(A )
dest_dir.mkdir(exist_ok=A )
for path in src_dir.iterdir():
snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case = dest_dir.joinpath(path.name )
print(A )
dest_path.open('w' ).write('\n'.join(A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 332 | 1 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
UpperCamelCase = None
UpperCamelCase = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
UpperCamelCase = datasets.Audio()
UpperCamelCase = '''audio'''
UpperCamelCase = AudioFolderConfig
UpperCamelCase = 42 # definition at the bottom of the script
UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' )
UpperCAmelCase__ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
UpperCAmelCase__ = AUDIO_EXTENSIONS
| 339 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str:
'''simple docstring'''
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False)
def A ( _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
return unittest.skip('Test was skipped' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict:
'''simple docstring'''
if test_case is None:
return partial(_UpperCAmelCase , version=_UpperCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase )
UpperCAmelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A ( _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase )
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = True
@classmethod
def _lowerCamelCase ( cls : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
@classmethod
def _lowerCamelCase ( cls : Union[str, Any]) -> str:
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('**/*'):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A)
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = AcceleratorState()
_UpperCAmelCase = tensor[None].clone().to(state.device )
_UpperCAmelCase = gather(_UpperCAmelCase ).cpu()
_UpperCAmelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCAmelCase ):
return False
return True
class __lowerCAmelCase :
def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_UpperCAmelCase )
else:
break
async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(_UpperCAmelCase ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ):
_UpperCAmelCase = line.decode('utf-8' ).rstrip()
sink.append(_UpperCAmelCase )
if not quiet:
print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCAmelCase , )
return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput:
'''simple docstring'''
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) )
_UpperCAmelCase = ' '.join(_UpperCAmelCase )
if result.returncode > 0:
_UpperCAmelCase = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class __lowerCAmelCase ( A ):
pass
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple:
'''simple docstring'''
try:
_UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCAmelCase , 'decode' ):
_UpperCAmelCase = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 339 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class A ( __UpperCAmelCase ):
__snake_case = 42
__snake_case = 42
__snake_case = None
class A ( __UpperCAmelCase , __UpperCAmelCase ):
__snake_case = 2
@register_to_config
def __init__( self, UpperCamelCase__ = 0.02, UpperCamelCase__ = 100, UpperCamelCase__ = 1.007, UpperCamelCase__ = 80, UpperCamelCase__ = 0.05, UpperCamelCase__ = 50, ):
"""simple docstring"""
lowerCAmelCase_ = sigma_max
# setable values
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None # sigma(t_i)
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
return sample
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
lowerCAmelCase_ = num_inference_steps
lowerCAmelCase_ = np.arange(0, self.num_inference_steps )[::-1].copy()
lowerCAmelCase_ = torch.from_numpy(__A ).to(__A )
lowerCAmelCase_ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCAmelCase_ = torch.tensor(__A, dtype=torch.floataa, device=__A )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
lowerCAmelCase_ = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1 )
else:
lowerCAmelCase_ = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCAmelCase_ = self.config.s_noise * randn_tensor(sample.shape, generator=__A ).to(sample.device )
lowerCAmelCase_ = sigma + gamma * sigma
lowerCAmelCase_ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = True, ):
"""simple docstring"""
lowerCAmelCase_ = sample_hat + sigma_hat * model_output
lowerCAmelCase_ = (sample_hat - pred_original_sample) / sigma_hat
lowerCAmelCase_ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__A, derivative=__A, pred_original_sample=__A )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = True, ):
"""simple docstring"""
lowerCAmelCase_ = sample_prev + sigma_prev * model_output
lowerCAmelCase_ = (sample_prev - pred_original_sample) / sigma_prev
lowerCAmelCase_ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__A, derivative=__A, pred_original_sample=__A )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
raise NotImplementedError()
| 359 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
raise NotImplementedError()
| 167 | 0 |
def A ( a_ ) -> list:
if len(a_ ) <= 1:
return [tuple(a_ )]
__UpperCamelCase : Dict =[]
def generate(a_ ,a_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 ,a_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =arr[k - 1], arr[i]
else: # k is odd
__UpperCamelCase , __UpperCamelCase : int =arr[k - 1], arr[0]
generate(k - 1 ,a_ )
generate(len(a_ ) ,a_ )
return res
if __name__ == "__main__":
A_ :Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
A_ :Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 71 |
import math
class _snake_case :
def __init__( self , _lowerCamelCase=0 ): # a graph with Node 0,1,...,N-1
a :Optional[int] = n
a :Union[str, Any] = [
[math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase )
] # adjacency matrix for weight
a :List[Any] = [
[math.inf for j in range(0 , _lowerCamelCase )] for i in range(0 , _lowerCamelCase )
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Tuple = w
def SCREAMING_SNAKE_CASE__ ( self ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
a :Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return self.dp[u][v]
if __name__ == "__main__":
snake_case : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 94 | 0 |
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict=2_81_23 ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__UpperCamelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 204 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE__ = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
SCREAMING_SNAKE_CASE__ = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
SCREAMING_SNAKE_CASE__ = model(input_ids.to(_lowercase ) , labels=labels.to(_lowercase ) ).loss
SCREAMING_SNAKE_CASE__ = -(labels.shape[-1] * loss.item())
SCREAMING_SNAKE_CASE__ = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 204 | 1 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_lowercase : Optional[Any] = logging.getLogger(__name__)
_lowercase : List[str] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_lowercase : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase_ = field(
default=snake_case_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowerCAmelCase_ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _snake_case ( self ):
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase_ = field(default=snake_case_ , metadata={'''help''': '''The input training data file (a text file).'''} )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowerCAmelCase_ = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCAmelCase_ = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _snake_case ( self ):
"""simple docstring"""
if self.train_file is not None:
lowercase_ : Dict = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowercase_ : List[str] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : List[str] = [json.loads(__SCREAMING_SNAKE_CASE ) for line in f.read().splitlines() if (len(__SCREAMING_SNAKE_CASE ) > 0 and not line.isspace())]
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
lowercase_ : str = {c: dataset[c] for c in dataset.column_names}
lowercase_ : str = refs
return Dataset.from_dict(__SCREAMING_SNAKE_CASE )
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ : Tuple = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase_ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __SCREAMING_SNAKE_CASE )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase_ : Optional[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowercase_ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
lowercase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
lowercase_ : Tuple = {}
if data_args.train_file is not None:
lowercase_ : Tuple = data_args.train_file
if data_args.validation_file is not None:
lowercase_ : Optional[int] = data_args.validation_file
lowercase_ : Union[str, Any] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
lowercase_ : List[str] = '''text'''
lowercase_ : Tuple = load_dataset(__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ : List[str] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase_ : Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name , **__SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
lowercase_ : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **__SCREAMING_SNAKE_CASE )
else:
lowercase_ : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
lowercase_ : Optional[int] = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowercase_ : Any = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
lowercase_ : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__SCREAMING_SNAKE_CASE )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
lowercase_ : Dict = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase_ : List[str] = AutoModelForMaskedLM.from_config(__SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(__SCREAMING_SNAKE_CASE ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowercase_ : Any = datasets['''train'''].column_names
else:
lowercase_ : Optional[Any] = datasets['''validation'''].column_names
lowercase_ : Any = '''text''' if '''text''' in column_names else column_names[0]
lowercase_ : List[str] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__SCREAMING_SNAKE_CASE : str ):
# Remove empty lines
lowercase_ : Optional[int] = [line for line in examples['''text'''] if len(__SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=data_args.max_seq_length )
lowercase_ : str = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowercase_ : List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowercase_ : Tuple = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowercase_ : int = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowercase_ : Optional[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowercase_ : List[Any] = DataCollatorForWholeWordMask(tokenizer=__SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase_ : Union[str, Any] = Trainer(
model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase_ : str = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowercase_ : Optional[Any] = model_args.model_name_or_path
else:
lowercase_ : Optional[Any] = None
lowercase_ : Any = trainer.train(resume_from_checkpoint=__SCREAMING_SNAKE_CASE )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase_ : List[str] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
lowercase_ : str = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase_ : Any = trainer.evaluate()
lowercase_ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
lowercase_ : int = perplexity
lowercase_ : Dict = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 93 | """simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( a , a , a , a="attention" ):
__a = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
__a = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def _lowerCamelCase( a , a , a , a=False ):
if split_mlp_wi:
__a = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
__a = (wi_a, wi_a)
else:
__a = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
__a = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def _lowerCamelCase( a , a , a , a ):
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def _lowerCamelCase( a , *, a , a ):
__a = traverse_util.flatten_dict(variables["target"] )
__a = {"/".join(a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__a = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , a )
__a = collections.OrderedDict()
# Shared embeddings.
__a = old["token_embedder/embedding"]
# Encoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "encoder" , "attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (MLP).
__a = tax_layer_norm_lookup(a , a , "encoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "encoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old[
"encoder/relpos_bias/rel_embedding"
].T
__a = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(a ):
# Block i, layer 0 (Self Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_self_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "self_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 1 (Cross Attention).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_cross_attention_layer_norm" )
__a , __a , __a , __a = tax_attention_lookup(a , a , "decoder" , "encoder_decoder_attention" )
__a = layer_norm
__a = k.T
__a = o.T
__a = q.T
__a = v.T
# Block i, layer 2 (MLP).
__a = tax_layer_norm_lookup(a , a , "decoder" , "pre_mlp_layer_norm" )
__a , __a = tax_mlp_lookup(a , a , "decoder" , a )
__a = layer_norm
if split_mlp_wi:
__a = wi[0].T
__a = wi[1].T
else:
__a = wi.T
__a = wo.T
__a = old["decoder/decoder_norm/scale"]
__a = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__a = old["decoder/logits_dense/kernel"].T
return new
def _lowerCamelCase( a , a ):
__a = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__a = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__a = state_dict["shared.weight"]
return state_dict
def _lowerCamelCase( a , a , a , a ):
__a = checkpoints.load_tax_checkpoint(a )
__a = convert_tax_to_pytorch(a , num_layers=config.num_layers , is_encoder_only=a )
__a = make_state_dict(a , a )
model.load_state_dict(a , strict=a )
def _lowerCamelCase( a , a , a , a = False ):
__a = TaConfig.from_json_file(a )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__a = TaEncoderModel(a )
else:
__a = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(a , a , a , a )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(a )
# Verify that we can load the checkpoint.
model.from_pretrained(a )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
SCREAMING_SNAKE_CASE__:Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 261 | 0 |
from __future__ import annotations
from math import pow, sqrt
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(_UpperCAmelCase , 2 ) - pow(_UpperCAmelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_UpperCAmelCase , 2 ) - pow(_UpperCAmelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_UpperCAmelCase , 2 ) + pow(_UpperCAmelCase , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :Tuple = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[int] = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__snake_case :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 131 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( __a, __a, unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = StableDiffusionDiffEditPipeline
_UpperCAmelCase :int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
_UpperCAmelCase :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
_UpperCAmelCase :int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_UpperCAmelCase :List[str] = frozenset([] )
def UpperCAmelCase__ ( self : List[str] ):
torch.manual_seed(0 )
lowerCamelCase_ : Dict =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
lowerCamelCase_ : str =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCamelCase_ : str =DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_zero=a_ , )
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ : List[str] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase_ : Optional[int] =CLIPTextModel(a_ )
lowerCamelCase_ : int =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ : str ={
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Any , snake_case__ : Any=0 ):
lowerCamelCase_ : List[str] =floats_tensor((1, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Dict =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : Optional[int] =torch.manual_seed(a_ )
else:
lowerCamelCase_ : List[Any] =torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Dict ={
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Any , snake_case__ : Dict , snake_case__ : List[Any]=0 ):
lowerCamelCase_ : str =floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : str =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Dict =Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : str =torch.manual_seed(a_ )
else:
lowerCamelCase_ : Dict =torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : int ={
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Any , snake_case__ : int , snake_case__ : Optional[Any]=0 ):
lowerCamelCase_ : Tuple =floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCamelCase_ : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : List[str] =Image.fromarray(np.uinta(a_ ) ).convert("RGB" )
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : int =torch.manual_seed(a_ )
else:
lowerCamelCase_ : Union[str, Any] =torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : Dict ={
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : str ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase_ : Optional[int] =self.get_dummy_components()
lowerCamelCase_ : Optional[int] =self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase_ : Optional[int] =self.get_dummy_inputs(a_ )
lowerCamelCase_ : Optional[Any] =pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
lowerCamelCase_ : int =self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCamelCase_ : Tuple =self.get_dummy_inputs(a_ )
lowerCamelCase_ : Tuple =pipe_loaded(**a_ )[0]
lowerCamelCase_ : List[str] =np.abs(output - output_loaded ).max()
self.assertLess(a_ , 1E-4 )
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : List[str] ="""cpu"""
lowerCamelCase_ : Optional[int] =self.get_dummy_components()
lowerCamelCase_ : Dict =self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : int =self.get_dummy_mask_inputs(a_ )
lowerCamelCase_ : Union[str, Any] =pipe.generate_mask(**a_ )
lowerCamelCase_ : Union[str, Any] =mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase_ : Tuple =np.array([0] * 9 )
lowerCamelCase_ : str =np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : int ="""cpu"""
lowerCamelCase_ : Union[str, Any] =self.get_dummy_components()
lowerCamelCase_ : Union[str, Any] =self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : List[str] =self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : str =pipe.invert(**a_ ).images
lowerCamelCase_ : List[str] =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : List[Any] =np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowerCamelCase_ : Optional[Any] =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
def UpperCAmelCase__ ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : Any ="""cpu"""
lowerCamelCase_ : List[Any] =self.get_dummy_components()
lowerCamelCase_ : str ={"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowerCamelCase_ : Dict =DPMSolverMultistepScheduler(**a_ )
lowerCamelCase_ : Any =DPMSolverMultistepInverseScheduler(**a_ )
lowerCamelCase_ : Optional[Any] =self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : List[Any] =self.get_dummy_inversion_inputs(a_ )
lowerCamelCase_ : Union[str, Any] =pipe.invert(**a_ ).images
lowerCamelCase_ : List[str] =image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ : int =np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowerCamelCase_ : Tuple =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1E-3 )
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
lowerCamelCase_ : Dict =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase_ : Optional[Any] =raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase_ : Optional[Any] =raw_image
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Tuple =torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] =StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : Any =DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : Optional[Any] =DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Optional[Any] ="""a bowl of fruit"""
lowerCamelCase_ : Union[str, Any] ="""a bowl of pears"""
lowerCamelCase_ : List[str] =pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : Optional[Any] =pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ ).latents
lowerCamelCase_ : Dict =pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase_ : Union[str, Any] =(
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Tuple =torch.manual_seed(0 )
lowerCamelCase_ : List[Any] =StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a_ , torch_dtype=torch.floataa )
lowerCamelCase_ : Dict =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ : Any =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase_ : Tuple ="""a bowl of fruit"""
lowerCamelCase_ : Optional[Any] ="""a bowl of pears"""
lowerCamelCase_ : Any =pipe.generate_mask(
image=self.raw_image , source_prompt=a_ , target_prompt=a_ , generator=a_ , )
lowerCamelCase_ : Optional[int] =pipe.invert(
prompt=a_ , image=self.raw_image , inpaint_strength=0.7 , generator=a_ , num_inference_steps=25 , ).latents
lowerCamelCase_ : Optional[Any] =pipe(
prompt=a_ , mask_image=a_ , image_latents=a_ , generator=a_ , negative_prompt=a_ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase_ : str =(
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 144 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
A_ = re.compile(r'''\s+''')
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(snake_case__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Any = [len(snake_case__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(snake_case__ ), "line_max": max(snake_case__ )}
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : List[str]=5 ):
"""simple docstring"""
_snake_case : Any = ["""auto-generated""", """autogenerated""", """automatically generated"""]
_snake_case : Tuple = example["""content"""].splitlines()
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Union[str, Any]=5 , snake_case__ : Any=0.05 ):
"""simple docstring"""
_snake_case : Optional[Any] = ["""unit tests""", """test file""", """configuration file"""]
_snake_case : List[Any] = example["""content"""].splitlines()
_snake_case : Dict = 0
_snake_case : str = 0
# first test
for _, line in zip(range(snake_case__ ) , snake_case__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case : Optional[int] = example["""content"""].count("""\n""" )
_snake_case : Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[int] = ["""def """, """class """, """for """, """while """]
_snake_case : str = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[str]=4 ):
"""simple docstring"""
_snake_case : List[Any] = example["""content"""].splitlines()
_snake_case : str = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase__ (snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : Optional[Any] = tokenizer(example["""content"""] , truncation=snake_case__ )["""input_ids"""]
_snake_case : Optional[Any] = len(example["""content"""] ) / len(snake_case__ )
return {"ratio": ratio}
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[int] = {}
results.update(get_hash(snake_case__ ) )
results.update(line_stats(snake_case__ ) )
results.update(alpha_stats(snake_case__ ) )
results.update(char_token_ratio(snake_case__ ) )
results.update(is_autogenerated(snake_case__ ) )
results.update(is_config_or_test(snake_case__ ) )
results.update(has_no_keywords(snake_case__ ) )
results.update(has_few_assignments(snake_case__ ) )
return results
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
if not check_uniques(snake_case__ , snake_case__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
with open(snake_case__ , """rb""" ) as f_in:
with gzip.open(str(snake_case__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(snake_case__ , snake_case__ )
os.unlink(snake_case__ )
# Settings
A_ = HfArgumentParser(PreprocessingArguments)
A_ = parser.parse_args()
if args.num_workers is None:
A_ = multiprocessing.cpu_count()
A_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
A_ = time.time()
A_ = load_dataset(args.dataset_name, split='''train''')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
A_ = time.time()
A_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
A_ = set(ds.unique('''hash'''))
A_ = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
A_ = time.time()
A_ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
A_ = time.time()
A_ , A_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
A_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
A_ = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
A_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
A_ = str(data_dir / F'''file-{file_number+1:012}.json''')
A_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 64 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="resnet50" , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = out_indices if out_indices is not None else [4]
__UpperCamelCase = stage_names
__UpperCamelCase = out_features
__UpperCamelCase = backbone
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = num_channels
__UpperCamelCase = use_pretrained_backbone
__UpperCamelCase = is_training
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = self.get_config()
return config, pixel_values
def UpperCAmelCase ( self ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = TimmBackbone(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (TimmBackbone,) if is_torch_available() else ()
lowercase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = TimmBackboneModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 'resnet18'
__UpperCamelCase = 'microsoft/resnet-18'
__UpperCamelCase = AutoBackbone.from_pretrained(__UpperCAmelCase , use_timm_backbone=__UpperCAmelCase )
__UpperCamelCase = AutoBackbone.from_pretrained(__UpperCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__UpperCamelCase = AutoBackbone.from_pretrained(__UpperCAmelCase , use_timm_backbone=__UpperCAmelCase , out_indices=[1, 2, 3] )
__UpperCamelCase = AutoBackbone.from_pretrained(__UpperCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('Safetensors is not supported by timm.' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase = True
__UpperCamelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__UpperCamelCase = self.all_model_classes[0]
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = model(**__UpperCAmelCase )
__UpperCamelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__UpperCamelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__UpperCamelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__UpperCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(**__UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__UpperCamelCase = copy.deepcopy(__UpperCAmelCase )
__UpperCamelCase = None
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(**__UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__UpperCamelCase = copy.deepcopy(__UpperCAmelCase )
__UpperCamelCase = False
__UpperCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(**__UpperCAmelCase )
| 361 |
"""simple docstring"""
UpperCamelCase : Union[str, Any] = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def A ( snake_case :Dict , snake_case :Tuple , snake_case :str , snake_case :Optional[int] ) -> Union[str, Any]:
# Return True if there is node that has not iterated.
__UpperCamelCase = [False] * len(snake_case )
__UpperCamelCase = [s]
__UpperCamelCase = True
while queue:
__UpperCamelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case )
__UpperCamelCase = True
__UpperCamelCase = u
return visited[t]
def A ( snake_case :int , snake_case :Any , snake_case :Union[str, Any] ) -> Optional[int]:
__UpperCamelCase = [-1] * (len(snake_case ))
__UpperCamelCase = 0
__UpperCamelCase = []
__UpperCamelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(snake_case , snake_case , snake_case , snake_case ):
__UpperCamelCase = float('Inf' )
__UpperCamelCase = sink
while s != source:
# Find the minimum value in select path
__UpperCamelCase = min(snake_case , graph[parent[s]][s] )
__UpperCamelCase = parent[s]
max_flow += path_flow
__UpperCamelCase = sink
while v != source:
__UpperCamelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__UpperCamelCase = parent[v]
for i in range(len(snake_case ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 263 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__A = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__A = {
"RUCAIBox/mvp": 1_024,
}
class A ( __UpperCAmelCase ):
lowerCamelCase : str = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = ["""input_ids""", """attention_mask"""]
lowerCamelCase : Tuple = MvpTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , lowerCamelCase__=True , **lowerCamelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCamelCase__ ) != add_prefix_space:
lowercase__ = getattr(lowerCamelCase__ , pre_tok_state.pop("""type""" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCamelCase__ )
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = """post_processor"""
lowercase__ = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state["""sep"""] )
if "cls" in state:
lowercase__ = tuple(state["""cls"""] )
lowercase__ = False
if state.get("""add_prefix_space""" , lowerCamelCase__ ) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get("""trim_offsets""" , lowerCamelCase__ ) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(lowerCamelCase__ , state.pop("""type""" ) )
lowercase__ = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__ = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
lowercase__ = value
def A__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowercase__ = kwargs.get("""is_split_into_words""" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def A__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowercase__ = kwargs.get("""is_split_into_words""" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> List[str]:
'''simple docstring'''
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 164 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 164 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class a_ ( a_ ):
'''simple docstring'''
@slow
@require_torch
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
lowerCAmelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
lowerCAmelCase_ = bertabert.config.encoder.vocab_size
lowerCAmelCase_ = tokenizer.sep_token_id
lowerCAmelCase_ = tokenizer.cls_token_id
lowerCAmelCase_ = 1_2_8
lowerCAmelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
lowerCAmelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
lowerCAmelCase_ = train_dataset.select(range(3_2 ) )
lowerCAmelCase_ = val_dataset.select(range(1_6 ) )
lowerCAmelCase_ = 4
def _map_to_encoder_decoder_inputs(lowercase_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCAmelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=lowercase_ , max_length=5_1_2 )
lowerCAmelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=lowercase_ , max_length=1_2_8 )
lowerCAmelCase_ = inputs.input_ids
lowerCAmelCase_ = inputs.attention_mask
lowerCAmelCase_ = outputs.input_ids
lowerCAmelCase_ = outputs.input_ids.copy()
lowerCAmelCase_ = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
lowerCAmelCase_ = outputs.attention_mask
assert all(len(lowercase_ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(lowercase_ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(lowercase_ ):
lowerCAmelCase_ = pred.label_ids
lowerCAmelCase_ = pred.predictions
# all unnecessary tokens are removed
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowercase_ ) )] ) / len(lowercase_ )
return {"accuracy": accuracy}
# map train dataset
lowerCAmelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
lowerCAmelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
lowerCAmelCase_ = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ = SeqaSeqTrainingArguments(
output_dir=lowercase_ , per_device_train_batch_size=lowercase_ , per_device_eval_batch_size=lowercase_ , predict_with_generate=lowercase_ , evaluation_strategy='steps' , do_train=lowercase_ , do_eval=lowercase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCAmelCase_ = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , compute_metrics=_compute_metrics , train_dataset=lowercase_ , eval_dataset=lowercase_ , tokenizer=lowercase_ , )
# start training
trainer.train()
| 359 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCamelCase_ = """pytorch_model.bin"""
lowerCamelCase_ = """pytorch_model.bin.index.json"""
lowerCamelCase_ = """adapter_config.json"""
lowerCamelCase_ = """adapter_model.bin"""
lowerCamelCase_ = """adapter_model.safetensors"""
lowerCamelCase_ = """tf_model.h5"""
lowerCamelCase_ = """tf_model.h5.index.json"""
lowerCamelCase_ = """model.ckpt"""
lowerCamelCase_ = """flax_model.msgpack"""
lowerCamelCase_ = """flax_model.msgpack.index.json"""
lowerCamelCase_ = """model.safetensors"""
lowerCamelCase_ = """model.safetensors.index.json"""
lowerCamelCase_ = """config.json"""
lowerCamelCase_ = """preprocessor_config.json"""
lowerCamelCase_ = FEATURE_EXTRACTOR_NAME
lowerCamelCase_ = """generation_config.json"""
lowerCamelCase_ = """modelcard.json"""
lowerCamelCase_ = """▁"""
lowerCamelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCamelCase_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCamelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCamelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def lowerCamelCase ( a_ ) -> Dict:
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
lowerCAmelCase_ = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
lowerCAmelCase_ = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 14 | 0 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class A :
def __init__(self : Union[str, Any] , __UpperCAmelCase : int=None , **__UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
UpperCAmelCase__ = model
UpperCAmelCase__ = kwargs.get("model_save_dir" , __UpperCAmelCase )
UpperCAmelCase__ = kwargs.get("latest_model_name" , __UpperCAmelCase )
def __call__(self : Tuple , **__UpperCAmelCase : int ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = {k: np.array(__UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(__UpperCAmelCase , __UpperCAmelCase )
@staticmethod
def lowercase_ (__UpperCAmelCase : Union[str, Path] , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Dict=None ) -> List[str]:
"""simple docstring"""
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
UpperCAmelCase__ = "CPUExecutionProvider"
return ort.InferenceSession(__UpperCAmelCase , providers=[provider] , sess_options=__UpperCAmelCase )
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Path] , __UpperCAmelCase : Optional[str] = None , **__UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase__ = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase__ = Path(__UpperCAmelCase ).joinpath(__UpperCAmelCase )
try:
shutil.copyfile(__UpperCAmelCase , __UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase__ = self.model_save_dir.joinpath(__UpperCAmelCase )
if src_path.exists():
UpperCAmelCase__ = Path(__UpperCAmelCase ).joinpath(__UpperCAmelCase )
try:
shutil.copyfile(__UpperCAmelCase , __UpperCAmelCase )
except shutil.SameFileError:
pass
def lowercase_ (self : List[str] , __UpperCAmelCase : Union[str, os.PathLike] , **__UpperCAmelCase : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
if os.path.isfile(__UpperCAmelCase ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
# saving model weights/files
self._save_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def lowercase_ (cls : List[Any] , __UpperCAmelCase : Union[str, Path] , __UpperCAmelCase : Optional[Union[bool, str, None]] = None , __UpperCAmelCase : Optional[Union[str, None]] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional["ort.SessionOptions"] = None , **__UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__UpperCAmelCase ):
UpperCAmelCase__ = OnnxRuntimeModel.load_model(
os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , provider=__UpperCAmelCase , sess_options=__UpperCAmelCase )
UpperCAmelCase__ = Path(__UpperCAmelCase )
# load model from hub
else:
# download model
UpperCAmelCase__ = hf_hub_download(
repo_id=__UpperCAmelCase , filename=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , revision=__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , )
UpperCAmelCase__ = Path(__UpperCAmelCase ).parent
UpperCAmelCase__ = Path(__UpperCAmelCase ).name
UpperCAmelCase__ = OnnxRuntimeModel.load_model(__UpperCAmelCase , provider=__UpperCAmelCase , sess_options=__UpperCAmelCase )
return cls(model=__UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def lowercase_ (cls : Union[str, Any] , __UpperCAmelCase : Union[str, Path] , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[str] = None , **__UpperCAmelCase : int , ) -> int:
"""simple docstring"""
UpperCAmelCase__ = None
if len(str(__UpperCAmelCase ).split("@" ) ) == 2:
UpperCAmelCase__ , UpperCAmelCase__ = model_id.split("@" )
return cls._from_pretrained(
model_id=__UpperCAmelCase , revision=__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , **__UpperCAmelCase , )
| 65 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_a = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_a = parser.parse_args()
if args.model_type == "bert":
_a = BertForMaskedLM.from_pretrained(args.model_name)
_a = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_a = model.state_dict()
_a = {}
for w in ["word_embeddings", "position_embeddings"]:
_a = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_a = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
_a = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_a = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_a = state_dict["""cls.predictions.decoder.weight"""]
_a = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_a = state_dict[F"""cls.predictions.transform.dense.{w}"""]
_a = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 194 | 0 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 352 |
from collections.abc import Generator
from math import sin
def UpperCAmelCase ( _lowerCamelCase ):
if len(_lowerCamelCase ) != 32:
raise ValueError("Input must be of length 32" )
A : Any = B""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase ( _lowerCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
A : List[Any] = format(_lowerCamelCase , "08x" )[-8:]
A : List[str] = B""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def UpperCAmelCase ( _lowerCamelCase ):
A : Optional[Any] = B""
for char in message:
bit_string += format(_lowerCamelCase , "08b" ).encode("utf-8" )
A : int = format(len(_lowerCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowerCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase ( _lowerCamelCase ):
if len(_lowerCamelCase ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(_lowerCamelCase ) , 512 ):
A : Optional[int] = bit_string[pos : pos + 512]
A : List[str] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCAmelCase ( _lowerCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
A : Union[str, Any] = format(_lowerCamelCase , "032b" )
A : List[str] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowerCamelCase , 2 )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
return (a + b) % 2**32
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase ( _lowerCamelCase ):
A : Union[str, Any] = preprocess(_lowerCamelCase )
A : Any = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
A : Optional[int] = 0X67452301
A : Any = 0Xefcdab89
A : Tuple = 0X98badcfe
A : Union[str, Any] = 0X10325476
A : Optional[Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowerCamelCase ):
A : Optional[Any] = aa
A : Optional[Any] = ba
A : List[Any] = ca
A : Optional[int] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
A : Dict = d ^ (b & (c ^ d))
A : Optional[Any] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
A : Optional[int] = c ^ (d & (b ^ c))
A : List[Any] = (5 * i + 1) % 16
elif i <= 47:
A : Tuple = b ^ c ^ d
A : str = (3 * i + 5) % 16
else:
A : Union[str, Any] = c ^ (b | not_aa(_lowerCamelCase ))
A : Any = (7 * i) % 16
A : Union[str, Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
A : Dict = d
A : Optional[int] = c
A : Optional[int] = b
A : Any = sum_aa(_lowerCamelCase , left_rotate_aa(_lowerCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
A : Dict = sum_aa(_lowerCamelCase , _lowerCamelCase )
A : Any = sum_aa(_lowerCamelCase , _lowerCamelCase )
A : Dict = sum_aa(_lowerCamelCase , _lowerCamelCase )
A : Union[str, Any] = sum_aa(_lowerCamelCase , _lowerCamelCase )
A : Optional[Any] = reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod() | 256 | 0 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str]="shi-labs/oneformer_demo" ) -> Union[str, Any]:
"""simple docstring"""
with open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) as f:
__lowerCamelCase = json.load(UpperCamelCase__ )
__lowerCamelCase = {}
__lowerCamelCase = []
__lowerCamelCase = []
for key, info in class_info.items():
__lowerCamelCase = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(UpperCamelCase__ ) )
__lowerCamelCase = thing_ids
__lowerCamelCase = class_names
return metadata
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=10 , lowerCamelCase__=False , lowerCamelCase__=255 , lowerCamelCase__="shi-labs/oneformer_demo" , lowerCamelCase__="ade20k_panoptic.json" , lowerCamelCase__=10 , ) -> str:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = {'shortest_edge': 32, 'longest_edge': 1_333} if size is None else size
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean
__lowerCamelCase = image_std
__lowerCamelCase = class_info_file
__lowerCamelCase = prepare_metadata(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = num_text
__lowerCamelCase = repo_path
# for the post_process_functions
__lowerCamelCase = 2
__lowerCamelCase = 10
__lowerCamelCase = 10
__lowerCamelCase = 3
__lowerCamelCase = 4
__lowerCamelCase = num_labels
__lowerCamelCase = do_reduce_labels
__lowerCamelCase = ignore_index
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=False ) -> int:
'''simple docstring'''
if not batched:
__lowerCamelCase = image_inputs[0]
if isinstance(lowerCamelCase__ , Image.Image ):
__lowerCamelCase , __lowerCamelCase = image.size
else:
__lowerCamelCase , __lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase = int(self.size['shortest_edge'] * h / w )
__lowerCamelCase = self.size['shortest_edge']
elif w > h:
__lowerCamelCase = self.size['shortest_edge']
__lowerCamelCase = int(self.size['shortest_edge'] * w / h )
else:
__lowerCamelCase = self.size['shortest_edge']
__lowerCamelCase = self.size['shortest_edge']
else:
__lowerCamelCase = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[0] )[0]
__lowerCamelCase = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[1] )[1]
return expected_height, expected_width
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
snake_case_ = image_processing_class
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = OneFormerImageProcessorTester(self )
@property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'image_std' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'ignore_index' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'class_info_file' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'num_text' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'repo_path' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'metadata' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_reduce_labels' ) )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
__lowerCamelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processing_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase , __lowerCamelCase = self.image_processing_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
__lowerCamelCase = image_processor(
lowerCamelCase__ , ['semantic'] * len(lowerCamelCase__ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processing_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase , __lowerCamelCase = self.image_processing_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
__lowerCamelCase = image_processor(
lowerCamelCase__ , ['semantic'] * len(lowerCamelCase__ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
__lowerCamelCase , __lowerCamelCase = self.image_processing_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase , __lowerCamelCase = self.image_processing_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
__lowerCamelCase = image_processor(
lowerCamelCase__ , ['semantic'] * len(lowerCamelCase__ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__="np" ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCamelCase = self.image_processing_tester.num_labels
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCamelCase__ )
if with_segmentation_maps:
__lowerCamelCase = num_labels
if is_instance_map:
__lowerCamelCase = list(range(lowerCamelCase__ ) ) * 2
__lowerCamelCase = dict(enumerate(lowerCamelCase__ ) )
__lowerCamelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCamelCase = [Image.fromarray(lowerCamelCase__ ) for annotation in annotations]
__lowerCamelCase = image_processor(
lowerCamelCase__ , ['semantic'] * len(lowerCamelCase__ ) , lowerCamelCase__ , return_tensors='pt' , instance_id_to_semantic_id=lowerCamelCase__ , pad_and_return_pixel_mask=lowerCamelCase__ , )
return inputs
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Any:
'''simple docstring'''
def common(lowerCamelCase__=False , lowerCamelCase__=None ):
__lowerCamelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowerCamelCase__ , is_instance_map=lowerCamelCase__ , segmentation_type=lowerCamelCase__ )
__lowerCamelCase = inputs['mask_labels']
__lowerCamelCase = inputs['class_labels']
__lowerCamelCase = inputs['pixel_values']
__lowerCamelCase = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowerCamelCase__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowerCamelCase__ )
common(is_instance_map=lowerCamelCase__ , segmentation_type='pil' )
common(is_instance_map=lowerCamelCase__ , segmentation_type='pil' )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = np.zeros((20, 50) )
__lowerCamelCase = 1
__lowerCamelCase = 1
__lowerCamelCase = 1
__lowerCamelCase = binary_mask_to_rle(lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
__lowerCamelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase = fature_extractor.post_process_semantic_segmentation(lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCamelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCamelCase = fature_extractor.post_process_semantic_segmentation(lowerCamelCase__ , target_sizes=lowerCamelCase__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
__lowerCamelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase = image_processor.post_process_instance_segmentation(lowerCamelCase__ , threshold=0 )
self.assertTrue(len(lowerCamelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , lowerCamelCase__ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
__lowerCamelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase = image_processor.post_process_panoptic_segmentation(lowerCamelCase__ , threshold=0 )
self.assertTrue(len(lowerCamelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , lowerCamelCase__ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 90 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__A = {
"gpt-neox-20b": 20_48,
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__=False , **lowerCamelCase__ , ) -> int:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space:
__lowerCamelCase = getattr(lowerCamelCase__ , pre_tok_state.pop('type' ) )
__lowerCamelCase = add_prefix_space
__lowerCamelCase = pre_tok_class(**lowerCamelCase__ )
__lowerCamelCase = add_prefix_space
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
__lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[int]:
'''simple docstring'''
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] )
if len(lowerCamelCase__ ) > self.model_max_length:
__lowerCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 90 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__lowerCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
def UpperCAmelCase_ (__a : str , __a : int=1_0_0 , __a : Tuple=" " ):
"""simple docstring"""
_a : int = text.split(__a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__a ) , __a )]
def UpperCAmelCase_ (__a : dict ):
"""simple docstring"""
_a, _a : Dict = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(__a ):
titles.append(title if title is not None else '' )
texts.append(__a )
return {"title": titles, "text": texts}
def UpperCAmelCase_ (__a : dict , __a : DPRContextEncoder , __a : DPRContextEncoderTokenizerFast ):
"""simple docstring"""
_a : Dict = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=__a , padding='longest' , return_tensors='pt' )['input_ids']
_a : Optional[int] = ctx_encoder(input_ids.to(device=__a ) , return_dict=__a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCAmelCase_ (__a : "RagExampleArguments" , __a : "ProcessingArguments" , __a : "IndexHnswArguments" , ):
"""simple docstring"""
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_a : Any = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_a : List[str] = dataset.map(__a , batched=__a , num_proc=processing_args.num_proc )
# And compute the embeddings
_a : Optional[int] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__a )
_a : int = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_a : str = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
_a : Optional[Any] = dataset.map(
partial(__a , ctx_encoder=__a , ctx_tokenizer=__a ) , batched=__a , batch_size=processing_args.batch_size , features=__a , )
# And finally save your dataset
_a : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(__a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_a : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=__a )
# And save the index
_a : Optional[int] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(__a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = field(
default=str(Path(lowercase__ ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
__UpperCAmelCase : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
__UpperCAmelCase : str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
__UpperCAmelCase : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=str(Path(lowercase__ ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[int] = field(
default=lowercase__ , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
__UpperCAmelCase : int = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : int = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
__UpperCAmelCase : int = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__lowerCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 1 |
"""simple docstring"""
from __future__ import annotations
import queue
class snake_case :
"""simple docstring"""
def __init__( self : int ,lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = data
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def a_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
UpperCAmelCase__ = input('Enter the value of the root node: ' ).strip().lower()
UpperCAmelCase__ = queue.Queue()
UpperCAmelCase__ = TreeNode(int(lowerCamelCase ) )
q.put(lowerCamelCase )
while not q.empty():
UpperCAmelCase__ = q.get()
UpperCAmelCase__ = f'''Enter the left node of {node_found.data}: '''
UpperCAmelCase__ = input(lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCAmelCase__ = TreeNode(int(lowerCamelCase ) )
UpperCAmelCase__ = left_node
q.put(lowerCamelCase )
UpperCAmelCase__ = f'''Enter the right node of {node_found.data}: '''
UpperCAmelCase__ = input(lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCAmelCase__ = TreeNode(int(lowerCamelCase ) )
UpperCAmelCase__ = right_node
q.put(lowerCamelCase )
raise
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
UpperCAmelCase__ = queue.Queue()
q.put(lowerCamelCase )
while not q.empty():
UpperCAmelCase__ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
UpperCAmelCase__ = queue.Queue()
q.put(lowerCamelCase )
while not q.empty():
UpperCAmelCase__ = []
while not q.empty():
UpperCAmelCase__ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowerCamelCase )
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
UpperCAmelCase__ = []
UpperCAmelCase__ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(lowerCamelCase )
UpperCAmelCase__ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase__ = stack.pop()
# start to traverse its right child
UpperCAmelCase__ = n.right
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
UpperCAmelCase__ = []
UpperCAmelCase__ = node
while n or stack:
while n:
stack.append(lowerCamelCase )
UpperCAmelCase__ = n.left
UpperCAmelCase__ = stack.pop()
print(n.data , end=',' )
UpperCAmelCase__ = n.right
def a_ ( lowerCamelCase ):
if not isinstance(lowerCamelCase , lowerCamelCase ) or not node:
return
UpperCAmelCase__ , UpperCAmelCase__ = [], []
UpperCAmelCase__ = node
stacka.append(lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase__ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def a_ ( lowerCamelCase = "" , lowerCamelCase=5_0 , lowerCamelCase="*" ):
if not s:
return "\n" + width * char
UpperCAmelCase__ , UpperCAmelCase__ = divmod(width - len(lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
lowerCAmelCase__ : TreeNode = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 98 | """simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ : Tuple = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowerCAmelCase__ : Optional[Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {'BertModelTest': 'BertModelTester'}
UpperCAmelCase__ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98 | 1 |
def _a ( lowerCamelCase: int , lowerCamelCase: int ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
from __future__ import annotations
from math import pi, sqrt
def _a ( lowerCamelCase: float , lowerCamelCase: float ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Tuple = '''unispeech'''
def __init__( self ,__UpperCAmelCase=32 ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=3072 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase="group" ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) ,__UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) ,__UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) ,__UpperCAmelCase=False ,__UpperCAmelCase=128 ,__UpperCAmelCase=16 ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase=0.0_5 ,__UpperCAmelCase=10 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=10 ,__UpperCAmelCase=0 ,__UpperCAmelCase=320 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=100 ,__UpperCAmelCase=256 ,__UpperCAmelCase=256 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase="mean" ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=256 ,__UpperCAmelCase=80 ,__UpperCAmelCase=0 ,__UpperCAmelCase=1 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.5 ,**__UpperCAmelCase ,) -> List[Any]:
super().__init__(**__UpperCAmelCase ,pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase )
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : Optional[int] = feat_extract_norm
lowerCAmelCase__ : Tuple = feat_extract_activation
lowerCAmelCase__ : Optional[int] = list(__UpperCAmelCase )
lowerCAmelCase__ : Any = list(__UpperCAmelCase )
lowerCAmelCase__ : str = list(__UpperCAmelCase )
lowerCAmelCase__ : Dict = conv_bias
lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings
lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups
lowerCAmelCase__ : int = len(self.conv_dim )
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Optional[int] = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : List[str] = hidden_dropout
lowerCAmelCase__ : Tuple = attention_dropout
lowerCAmelCase__ : Union[str, Any] = activation_dropout
lowerCAmelCase__ : List[Any] = feat_proj_dropout
lowerCAmelCase__ : Optional[Any] = final_dropout
lowerCAmelCase__ : Optional[Any] = layerdrop
lowerCAmelCase__ : Optional[Any] = layer_norm_eps
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : str = num_ctc_classes
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Optional[int] = do_stable_layer_norm
lowerCAmelCase__ : Tuple = use_weighted_layer_sum
lowerCAmelCase__ : Tuple = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ : Any = apply_spec_augment
lowerCAmelCase__ : Dict = mask_time_prob
lowerCAmelCase__ : Dict = mask_time_length
lowerCAmelCase__ : Union[str, Any] = mask_time_min_masks
lowerCAmelCase__ : Optional[Any] = mask_feature_prob
lowerCAmelCase__ : List[str] = mask_feature_length
lowerCAmelCase__ : Optional[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ : str = num_codevectors_per_group
lowerCAmelCase__ : Optional[int] = num_codevector_groups
lowerCAmelCase__ : Dict = contrastive_logits_temperature
lowerCAmelCase__ : Tuple = feat_quantizer_dropout
lowerCAmelCase__ : Tuple = num_negatives
lowerCAmelCase__ : Union[str, Any] = codevector_dim
lowerCAmelCase__ : str = proj_codevector_dim
lowerCAmelCase__ : Optional[Any] = diversity_loss_weight
# ctc loss
lowerCAmelCase__ : str = ctc_loss_reduction
lowerCAmelCase__ : Tuple = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ : Union[str, Any] = replace_prob
@property
def UpperCAmelCase_ ( self ) -> Any:
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 37 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , )-> Dict:
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = scope
def A__ ( self )-> List[str]:
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self )-> str:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
__UpperCamelCase = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
__UpperCamelCase = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_snake_case = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = True
_snake_case = True
_snake_case = True
def A__ ( self )-> Dict:
'''simple docstring'''
__UpperCamelCase = DistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def A__ ( self )-> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def A__ ( self )-> List[str]:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def A__ ( self )-> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__UpperCamelCase = True
__UpperCamelCase = model_class(config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
__UpperCamelCase = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
__UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 328 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: str ={
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __A ( UpperCamelCase__ ):
a__ : str = """efficientnet"""
def __init__(self : str , __a : int = 3 , __a : int = 600 , __a : float = 2.0 , __a : float = 3.1 , __a : int = 8 , __a : List[int] = [3, 3, 5, 3, 5, 5, 3] , __a : List[int] = [32, 16, 24, 40, 80, 112, 192] , __a : List[int] = [16, 24, 40, 80, 112, 192, 320] , __a : List[int] = [] , __a : List[int] = [1, 2, 2, 2, 1, 2, 1] , __a : List[int] = [1, 2, 2, 3, 3, 4, 1] , __a : List[int] = [1, 6, 6, 6, 6, 6, 6] , __a : float = 0.25 , __a : str = "swish" , __a : int = 2560 , __a : str = "mean" , __a : float = 0.02 , __a : float = 0.0_01 , __a : float = 0.99 , __a : float = 0.5 , __a : float = 0.2 , **__a : List[Any] , ):
super().__init__(**__a )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = width_coefficient
UpperCAmelCase_ = depth_coefficient
UpperCAmelCase_ = depth_divisor
UpperCAmelCase_ = kernel_sizes
UpperCAmelCase_ = in_channels
UpperCAmelCase_ = out_channels
UpperCAmelCase_ = depthwise_padding
UpperCAmelCase_ = strides
UpperCAmelCase_ = num_block_repeats
UpperCAmelCase_ = expand_ratios
UpperCAmelCase_ = squeeze_expansion_ratio
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dim
UpperCAmelCase_ = pooling_type
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = batch_norm_eps
UpperCAmelCase_ = batch_norm_momentum
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = drop_connect_rate
UpperCAmelCase_ = sum(__a ) * 4
class __A ( UpperCamelCase__ ):
a__ : Any = version.parse("""1.11""" )
@property
def _lowercase (self : Optional[int] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase (self : Optional[Any] ):
return 1E-5
| 106 | '''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : List[Any] = MobileBertTokenizer
a__ : str = MobileBertTokenizerFast
a__ : List[str] = True
a__ : Dict = True
a__ : Optional[int] = filter_non_english
a__ : int = """google/mobilebert-uncased"""
def _lowercase (self : List[str] ):
super().setUp()
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _lowercase (self : Tuple , __a : str ):
UpperCAmelCase_ = "UNwant\u00E9d,running"
UpperCAmelCase_ = "unwanted, running"
return input_text, output_text
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def _lowercase (self : Dict ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "UNwant\u00E9d,running"
UpperCAmelCase_ = tokenizer.tokenize(__a )
UpperCAmelCase_ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# With lower casing
UpperCAmelCase_ = self.get_tokenizer(do_lower_case=__a )
UpperCAmelCase_ = self.get_rust_tokenizer(do_lower_case=__a )
UpperCAmelCase_ = "UNwant\u00E9d,running"
UpperCAmelCase_ = tokenizer.tokenize(__a )
UpperCAmelCase_ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def _lowercase (self : Dict ):
UpperCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase (self : Any ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = BasicTokenizer(do_lower_case=__a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _lowercase (self : Any ):
UpperCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCAmelCase_ = {}
for i, token in enumerate(__a ):
UpperCAmelCase_ = i
UpperCAmelCase_ = WordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _lowercase (self : Optional[int] ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _lowercase (self : str ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _lowercase (self : Any ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _lowercase (self : Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase_ = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
UpperCAmelCase_ = tokenizer_r.do_lower_case if hasattr(__a , "do_lower_case" ) else False
UpperCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = ["的", "人", "有"]
UpperCAmelCase_ = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = True
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = tokenizer_p.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_r.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(__a )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = False
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase_ = tokenizer_r.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_p.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(__a )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase_ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
| 106 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : List[Any] = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCAmelCase_ : Dict ="luke"
def __init__( self , _lowerCamelCase=5_0_2_6_7 , _lowerCamelCase=5_0_0_0_0_0 , _lowerCamelCase=7_6_8 , _lowerCamelCase=2_5_6 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , **_lowerCamelCase , ):
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase = vocab_size
lowercase = entity_vocab_size
lowercase = hidden_size
lowercase = entity_emb_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = use_entity_aware_attention
lowercase = classifier_dropout
| 220 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
benchmark()
| 14 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Union[List[PIL.Image.Image], np.ndarray]
__lowercase : Optional[List[bool]]
__lowercase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 18 | '''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 18 | 1 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""audio_values""", """audio_mask"""]
def __init__( self :List[str] , lowerCamelCase_ :List[str]=2_048 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=[16, 16] , lowerCamelCase_ :str=128 , lowerCamelCase_ :Union[str, Any]=44_100 , lowerCamelCase_ :Optional[Any]=86 , lowerCamelCase_ :Dict=2_048 , lowerCamelCase_ :Union[str, Any]=0.0 , **lowerCamelCase_ :Tuple , ):
"""simple docstring"""
super().__init__(
feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase__ : List[str] =spectrogram_length
lowerCamelCase__ : Dict =num_channels
lowerCamelCase__ : List[Any] =patch_size
lowerCamelCase__ : Union[str, Any] =feature_size // self.patch_size[1]
lowerCamelCase__ : int =n_fft
lowerCamelCase__ : List[str] =sampling_rate // hop_length_to_sampling_rate
lowerCamelCase__ : str =sampling_rate
lowerCamelCase__ : int =padding_value
lowerCamelCase__ : Dict =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCamelCase_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCamelCase_ , norm='slaney' , mel_scale='slaney' , ).T
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :np.array ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =spectrogram(
lowerCamelCase_ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
lowerCamelCase__ : Any =log_spec[:, :-1]
lowerCamelCase__ : Tuple =log_spec - 20.0
lowerCamelCase__ : List[str] =np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self :Optional[Any] , lowerCamelCase_ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :Optional[bool] = True , lowerCamelCase_ :Optional[int] = None , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = False , **lowerCamelCase_ :Tuple , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCamelCase__ : Dict =isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowerCamelCase__ : Union[str, Any] =is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : Optional[Any] =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
lowerCamelCase__ : Optional[Any] =np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : Union[str, Any] =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : List[str] =[np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase__ : Any =[
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCamelCase_ ):
lowerCamelCase__ : Dict =[np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase__ : Optional[Any] =max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase__ : Any =[
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase__ : Union[str, Any] =np.array(lowerCamelCase_ ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase__ : Tuple =max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase__ : str =np.ones([len(lowerCamelCase_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase__ : Dict =padded_audio_features * self.padding_value
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ : Union[str, Any] =audio_features[i]
lowerCamelCase__ : Union[str, Any] =feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase__ : int ={'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowerCamelCase__ : Tuple ={'audio_values': padded_audio_features}
lowerCamelCase__ : Union[str, Any] =BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
return encoded_inputs | 126 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ = None
def __init__( self :Dict , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :Any=None , lowerCamelCase_ :int=None , lowerCamelCase_ :List[str]="<unk>" , lowerCamelCase_ :List[Any]="<s>" , lowerCamelCase_ :str="</s>" , lowerCamelCase_ :Union[str, Any]="<pad>" , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :Dict=False , **lowerCamelCase_ :List[Any] , ):
"""simple docstring"""
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase__ : List[str] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase_ ) != add_prefix_space:
lowerCamelCase__ : str =getattr(lowerCamelCase_ , pre_tok_state.pop('type' ) )
lowerCamelCase__ : List[Any] =add_prefix_space
lowerCamelCase__ : Optional[Any] =pre_tok_class(**lowerCamelCase_ )
lowerCamelCase__ : Any =add_prefix_space
def UpperCAmelCase__ ( self :Optional[int] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._batch_encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :int , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :"Conversation" ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) + [self.eos_token_id] )
if len(lowerCamelCase_ ) > self.model_max_length:
lowerCamelCase__ : List[str] =input_ids[-self.model_max_length :]
return input_ids | 126 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Optional[int] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 | #
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *_lowercase ):
with open(_lowercase , '''r''' ) as fh:
fcntl.flock(_lowercase , fcntl.LOCK_EX )
try:
print(*_lowercase )
finally:
fcntl.flock(_lowercase , fcntl.LOCK_UN )
__UpperCamelCase : Union[str, Any] = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__UpperCamelCase : Any = torch.device('cuda', local_rank)
__UpperCamelCase : Union[str, Any] = socket.gethostname()
__UpperCamelCase : Tuple = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__UpperCamelCase : List[Any] = dist.get_rank()
__UpperCamelCase : List[Any] = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 258 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCAmelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def UpperCAmelCase_ ( __snake_case , __snake_case=100 , __snake_case=" " ) -> List[str]:
"""simple docstring"""
_lowercase =text.split(__snake_case )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__snake_case ) , __snake_case )]
def UpperCAmelCase_ ( __snake_case ) -> dict:
"""simple docstring"""
_lowercase , _lowercase =[], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__snake_case ):
titles.append(title if title is not None else '''''' )
texts.append(__snake_case )
return {"title": titles, "text": texts}
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> dict:
"""simple docstring"""
_lowercase =ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__snake_case , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
_lowercase =ctx_encoder(input_ids.to(device=__snake_case ) , return_dict=__snake_case ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> Dict:
"""simple docstring"""
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_lowercase =load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_lowercase =dataset.map(__snake_case , batched=__snake_case , num_proc=processing_args.num_proc )
# And compute the embeddings
_lowercase =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__snake_case )
_lowercase =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_lowercase =Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
_lowercase =dataset.map(
partial(__snake_case , ctx_encoder=__snake_case , ctx_tokenizer=__snake_case ) , batched=__snake_case , batch_size=processing_args.batch_size , features=__snake_case , )
# And finally save your dataset
_lowercase =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__snake_case )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_lowercase =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__snake_case )
# And save the index
_lowercase =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__snake_case )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = field(
default=str(Path(lowerCAmelCase).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''') , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowerCAmelCase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
SCREAMING_SNAKE_CASE__ = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
SCREAMING_SNAKE_CASE__ = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=str(Path(lowerCAmelCase).parent / '''test_run''' / '''dummy-kb''') , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = field(
default=lowerCAmelCase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
SCREAMING_SNAKE_CASE__ = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
SCREAMING_SNAKE_CASE__ = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCAmelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 5 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Matrix:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(size + 1 )] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for row in range(__snake_case ):
for col in range(__snake_case ):
_lowercase =matrix[row][col]
_lowercase =vector[row][0]
_lowercase =0
_lowercase =0
while row < size and col < size:
# pivoting
_lowercase =max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __snake_case ):
_lowercase =augmented[rowa][col] / augmented[row][col]
_lowercase =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __snake_case ):
for row in range(__snake_case ):
_lowercase =augmented[row][col] / augmented[col][col]
for cola in range(__snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case )
]
def UpperCAmelCase_ ( __snake_case ) -> Callable[[int], int]:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
_lowercase =[[0] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for x_val, y_val in enumerate(__snake_case ):
for col in range(__snake_case ):
_lowercase =(x_val + 1) ** (size - col - 1)
_lowercase =y_val
_lowercase =solve(__snake_case , __snake_case )
def interpolated_func(__snake_case ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__snake_case ) )
return interpolated_func
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase_ ( __snake_case = question_function , __snake_case = 10 ) -> int:
"""simple docstring"""
_lowercase =[func(__snake_case ) for x_val in range(1 , order + 1 )]
_lowercase =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase =0
_lowercase =42
_lowercase =42
for poly in polynomials:
_lowercase =1
while func(__snake_case ) == poly(__snake_case ):
x_val += 1
ret += poly(__snake_case )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
import random
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : bool = False) -> dict:
'''simple docstring'''
__UpperCamelCase : dict = {i: [] for i in range(_lowerCamelCase)}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowerCamelCase)
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowerCamelCase):
for j in range(i + 1 , _lowerCamelCase):
if random.random() < probability:
graph[i].append(_lowerCamelCase)
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowerCamelCase)
return graph
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_lowerCamelCase) if i != j] for i in range(_lowerCamelCase)
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 151 |
lowercase : Optional[int] = 9.8_0_6_6_5
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float = g) -> float:
'''simple docstring'''
if fluid_density <= 0:
raise ValueError("Impossible fluid density")
if volume < 0:
raise ValueError("Impossible Object volume")
if gravity <= 0:
raise ValueError("Impossible Gravity")
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 151 | 1 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowercase : Dict = "__DUMMY_TRANSFORMERS_USER__"
lowercase : Union[str, Any] = "Dummy User"
lowercase : List[Any] = "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"
lowercase : List[str] = "https://hub-ci.huggingface.co"
lowercase : Optional[Any] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
lowercase : str = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
lowercase : str = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( __A ) -> Any:
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , __A )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , __A )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , __A )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( __A ) -> Tuple:
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , __A )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Tuple:
HfFolder.save_token(__A )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
return HfApi(endpoint=__A )
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
_snake_case = HfFolder.get_token()
HfFolder.save_token(__A )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__A )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict:
def _cleanup_repo(__A ):
hf_api.delete_repo(__A , token=__A , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[str]:
@contextmanager
def _temporary_repo(__A ):
try:
yield repo_id
finally:
cleanup_repo(__A )
return _temporary_repo
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any:
_snake_case = F'repo_txt_data-{int(time.time() * 10e3 )}'
_snake_case = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(__A , token=__A , repo_type='dataset' , private=__A )
hf_api.upload_file(
token=__A , path_or_fileobj=str(__A ) , path_in_repo='data/text_data.txt' , repo_id=__A , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__A , token=__A , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Union[str, Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> str:
_snake_case = F'repo_zipped_txt_data-{int(time.time() * 10e3 )}'
_snake_case = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(__A , token=__A , repo_type='dataset' , private=__A )
hf_api.upload_file(
token=__A , path_or_fileobj=str(__A ) , path_in_repo='data.zip' , repo_id=__A , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__A , token=__A , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Tuple:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> str:
_snake_case = F'repo_zipped_img_data-{int(time.time() * 10e3 )}'
_snake_case = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(__A , token=__A , repo_type='dataset' , private=__A )
hf_api.upload_file(
token=__A , path_or_fileobj=str(__A ) , path_in_repo='data.zip' , repo_id=__A , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__A , token=__A , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Optional[int]:
return hf_private_dataset_repo_zipped_img_data_
| 42 |
'''simple docstring'''
_snake_case = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
_snake_case = frozenset(['prompt', 'negative_prompt'])
_snake_case = frozenset([])
_snake_case = frozenset(['image'])
_snake_case = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
_snake_case = frozenset(['image'])
_snake_case = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
_snake_case = frozenset(['prompt', 'image', 'negative_prompt'])
_snake_case = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
_snake_case = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
_snake_case = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
_snake_case = frozenset(['image', 'mask_image'])
_snake_case = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
_snake_case = frozenset(['example_image', 'image', 'mask_image'])
_snake_case = frozenset(['class_labels'])
_snake_case = frozenset(['class_labels'])
_snake_case = frozenset(['batch_size'])
_snake_case = frozenset([])
_snake_case = frozenset(['batch_size'])
_snake_case = frozenset([])
_snake_case = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
_snake_case = frozenset(['prompt', 'negative_prompt'])
_snake_case = frozenset(['input_tokens'])
_snake_case = frozenset(['input_tokens'])
| 250 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : str = "roformer"
def __init__( self: str , UpperCAmelCase_: Tuple=50_000 , UpperCAmelCase_: int=None , UpperCAmelCase_: List[str]=768 , UpperCAmelCase_: Optional[Any]=12 , UpperCAmelCase_: Optional[Any]=12 , UpperCAmelCase_: Optional[int]=3_072 , UpperCAmelCase_: Dict="gelu" , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: str=0.1 , UpperCAmelCase_: Tuple=1_536 , UpperCAmelCase_: int=2 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Dict=1E-12 , UpperCAmelCase_: Optional[int]=0 , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: Any=True , **UpperCAmelCase_: List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size if embedding_size is None else embedding_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = rotary_value
_SCREAMING_SNAKE_CASE = use_cache
class __UpperCAmelCase (_UpperCAmelCase ):
@property
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 125 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCamelCase = get_logger(__name__)
class __UpperCAmelCase :
__snake_case : Tuple = "dummy_data"
__snake_case : List[Any] = "datasets"
__snake_case : List[Any] = False
def __init__( self: Any , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Union[Version, str] , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = True , UpperCAmelCase_: Optional[List[Callable]] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = dataset_name
_SCREAMING_SNAKE_CASE = cache_dir
_SCREAMING_SNAKE_CASE = use_local_dummy_data
_SCREAMING_SNAKE_CASE = config
# download_callbacks take a single url as input
_SCREAMING_SNAKE_CASE = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_SCREAMING_SNAKE_CASE = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_SCREAMING_SNAKE_CASE = str(UpperCAmelCase_ )
# to be downloaded
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
if self._dummy_file is None:
_SCREAMING_SNAKE_CASE = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_SCREAMING_SNAKE_CASE = cached_path(
UpperCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCAmelCase_ , force_extract=UpperCAmelCase_ )
return os.path.join(UpperCAmelCase_ , self.dummy_file_name )
@property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
if self._bucket_url is None:
_SCREAMING_SNAKE_CASE = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def UpperCamelCase ( self: str , UpperCAmelCase_: str , *UpperCAmelCase_: Dict ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_SCREAMING_SNAKE_CASE = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_SCREAMING_SNAKE_CASE = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return self.create_dummy_data_dict(UpperCAmelCase_ , UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , (list, tuple) ):
return self.create_dummy_data_list(UpperCAmelCase_ , UpperCAmelCase_ )
else:
return self.create_dummy_data_single(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: int , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Any ):
'''simple docstring'''
return self.download_and_extract(UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
return self.download_and_extract(UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Dict , *UpperCAmelCase_: Tuple , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
return path
def UpperCamelCase ( self: str ):
'''simple docstring'''
return {}
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for single_url in single_urls:
download_callback(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = single_urls
download_callback(UpperCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = [os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(Path(UpperCAmelCase_ ).name ) ) for x in single_urls]
else:
_SCREAMING_SNAKE_CASE = single_urls
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(Path(UpperCAmelCase_ ).name ) )
_SCREAMING_SNAKE_CASE = value
# make sure that values are unique
if all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_SCREAMING_SNAKE_CASE = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase ( self: int , UpperCAmelCase_: Dict , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_SCREAMING_SNAKE_CASE = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCAmelCase_ ) ) for url in data_url )
_SCREAMING_SNAKE_CASE = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_SCREAMING_SNAKE_CASE = [data_url[0]] * len(UpperCAmelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(UpperCAmelCase_ )
return dummy_data_list
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Dict , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(UpperCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(UpperCAmelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
def _iter_archive_members(UpperCAmelCase_: Any ):
# this preserves the order of the members inside the ZIP archive
_SCREAMING_SNAKE_CASE = Path(self.dummy_file ).parent
_SCREAMING_SNAKE_CASE = path.relative_to(UpperCAmelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_SCREAMING_SNAKE_CASE = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = Path(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = _iter_archive_members(UpperCAmelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(UpperCAmelCase_ ).as_posix(), file_path.open("""rb""" )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = [paths]
for path in paths:
if os.path.isfile(UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(UpperCAmelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
| 125 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
A_ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A_ : Any = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=8 )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
_UpperCAmelCase : List[Any] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_ ,a_ ,a_ ,a_ ,a_ ,) -> Optional[Any]:
super().__init__()
self.register_modules(
text_encoder=a_ ,tokenizer=a_ ,unet=a_ ,scheduler=a_ ,movq=a_ ,)
_UpperCAmelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ) -> Optional[int]:
if latents is None:
_UpperCAmelCase : Optional[int] = randn_tensor(a_ ,generator=a_ ,device=a_ ,dtype=a_ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_UpperCAmelCase : Optional[int] = latents.to(a_ )
_UpperCAmelCase : int = latents * scheduler.init_noise_sigma
return latents
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ ,a_=None ,) -> List[Any]:
_UpperCAmelCase : Union[str, Any] = len(a_ ) if isinstance(a_ ,a_ ) else 1
# get prompt text embeddings
_UpperCAmelCase : Optional[int] = self.tokenizer(
a_ ,padding="""max_length""" ,truncation=a_ ,max_length=77 ,return_attention_mask=a_ ,add_special_tokens=a_ ,return_tensors="""pt""" ,)
_UpperCAmelCase : Union[str, Any] = text_inputs.input_ids
_UpperCAmelCase : Union[str, Any] = self.tokenizer(a_ ,padding="""longest""" ,return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a_ ,a_ ):
_UpperCAmelCase : Union[str, Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_UpperCAmelCase : Optional[Any] = text_input_ids.to(a_ )
_UpperCAmelCase : List[str] = text_inputs.attention_mask.to(a_ )
_UpperCAmelCase ,_UpperCAmelCase : Tuple = self.text_encoder(
input_ids=a_ ,attention_mask=a_ )
_UpperCAmelCase : Any = prompt_embeds.repeat_interleave(a_ ,dim=0 )
_UpperCAmelCase : List[str] = text_encoder_hidden_states.repeat_interleave(a_ ,dim=0 )
_UpperCAmelCase : Tuple = text_mask.repeat_interleave(a_ ,dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase : List[str]
if negative_prompt is None:
_UpperCAmelCase : Tuple = [""""""] * batch_size
elif type(a_ ) is not type(a_ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(a_ )} !='''
f''' {type(a_ )}.''' )
elif isinstance(a_ ,a_ ):
_UpperCAmelCase : Union[str, Any] = [negative_prompt]
elif batch_size != len(a_ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(a_ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
_UpperCAmelCase : Dict = negative_prompt
_UpperCAmelCase : List[Any] = self.tokenizer(
a_ ,padding="""max_length""" ,max_length=77 ,truncation=a_ ,return_attention_mask=a_ ,add_special_tokens=a_ ,return_tensors="""pt""" ,)
_UpperCAmelCase : Union[str, Any] = uncond_input.input_ids.to(a_ )
_UpperCAmelCase : Any = uncond_input.attention_mask.to(a_ )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = self.text_encoder(
input_ids=a_ ,attention_mask=a_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase : List[Any] = negative_prompt_embeds.shape[1]
_UpperCAmelCase : Dict = negative_prompt_embeds.repeat(1 ,a_ )
_UpperCAmelCase : Optional[int] = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,a_ )
_UpperCAmelCase : int = uncond_text_encoder_hidden_states.shape[1]
_UpperCAmelCase : Dict = uncond_text_encoder_hidden_states.repeat(1 ,a_ ,1 )
_UpperCAmelCase : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,a_ ,-1 )
_UpperCAmelCase : str = uncond_text_mask.repeat_interleave(a_ ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase : List[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
_UpperCAmelCase : Optional[int] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
_UpperCAmelCase : Optional[int] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def _snake_case ( self ,a_=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_UpperCAmelCase : Any = torch.device(f'''cuda:{gpu_id}''' )
_UpperCAmelCase : Any = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a_ ,a_ )
def _snake_case ( self ,a_=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version(""">=""" ,"""0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_UpperCAmelCase : Dict = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" ,silence_dtype_warnings=a_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase : Union[str, Any] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
_UpperCAmelCase ,_UpperCAmelCase : Tuple = cpu_offload_with_hook(a_ ,a_ ,prev_module_hook=a_ )
if self.safety_checker is not None:
_UpperCAmelCase ,_UpperCAmelCase : Any = cpu_offload_with_hook(self.safety_checker ,a_ ,prev_module_hook=a_ )
# We'll offload the last model manually.
_UpperCAmelCase : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self ) -> Tuple:
if not hasattr(self.unet ,"""_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a_ ,"""_hf_hook""" )
and hasattr(module._hf_hook ,"""execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a_ )
def __call__( self ,a_ ,a_ ,a_ ,a_ = None ,a_ = 512 ,a_ = 512 ,a_ = 100 ,a_ = 4.0 ,a_ = 1 ,a_ = None ,a_ = None ,a_ = "pil" ,a_ = True ,) -> Optional[int]:
if isinstance(a_ ,a_ ):
_UpperCAmelCase : List[str] = 1
elif isinstance(a_ ,a_ ):
_UpperCAmelCase : str = len(a_ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(a_ )}''' )
_UpperCAmelCase : List[Any] = self._execution_device
_UpperCAmelCase : List[Any] = batch_size * num_images_per_prompt
_UpperCAmelCase : Optional[Any] = guidance_scale > 1.0
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = self._encode_prompt(
a_ ,a_ ,a_ ,a_ ,a_ )
if isinstance(a_ ,a_ ):
_UpperCAmelCase : Union[str, Any] = torch.cat(a_ ,dim=0 )
if isinstance(a_ ,a_ ):
_UpperCAmelCase : Union[str, Any] = torch.cat(a_ ,dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase : List[str] = image_embeds.repeat_interleave(a_ ,dim=0 )
_UpperCAmelCase : Any = negative_image_embeds.repeat_interleave(a_ ,dim=0 )
_UpperCAmelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=a_ )
self.scheduler.set_timesteps(a_ ,device=a_ )
_UpperCAmelCase : int = self.scheduler.timesteps
_UpperCAmelCase : Optional[int] = self.unet.config.in_channels
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = get_new_h_w(a_ ,a_ ,self.movq_scale_factor )
# create initial latent
_UpperCAmelCase : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,a_ ,a_ ,a_ ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase : List[Any] = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
_UpperCAmelCase : List[str] = self.unet(
sample=a_ ,timestep=a_ ,encoder_hidden_states=a_ ,added_cond_kwargs=a_ ,return_dict=a_ ,)[0]
if do_classifier_free_guidance:
_UpperCAmelCase ,_UpperCAmelCase : List[str] = noise_pred.split(latents.shape[1] ,dim=1 )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = noise_pred.chunk(2 )
_UpperCAmelCase ,_UpperCAmelCase : Dict = variance_pred.chunk(2 )
_UpperCAmelCase : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase : str = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"""variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase ,_UpperCAmelCase : List[str] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : List[str] = self.scheduler.step(
a_ ,a_ ,a_ ,generator=a_ ,).prev_sample
# post-processing
_UpperCAmelCase : Dict = self.movq.decode(a_ ,force_not_quantize=a_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_UpperCAmelCase : int = image * 0.5 + 0.5
_UpperCAmelCase : Optional[Any] = image.clamp(0 ,1 )
_UpperCAmelCase : List[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase : List[str] = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 215 |
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self ) -> List[str]:
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Optional[int] = {}
def _snake_case ( self ,a_ ) -> Optional[Any]:
if vertex not in self.adjacency:
_UpperCAmelCase : int = {}
self.num_vertices += 1
def _snake_case ( self ,a_ ,a_ ,a_ ) -> int:
self.add_vertex(a_ )
self.add_vertex(a_ )
if head == tail:
return
_UpperCAmelCase : List[Any] = weight
_UpperCAmelCase : Dict = weight
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Optional[int] = self.get_edges()
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = edge
edges.remove((tail, head, weight) )
for i in range(len(a_ ) ):
_UpperCAmelCase : str = list(edges[i] )
edges.sort(key=lambda a_ : e[2] )
for i in range(len(a_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_UpperCAmelCase : Optional[Any] = edges[i][2] + 1
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = edge
_UpperCAmelCase : str = weight
_UpperCAmelCase : List[str] = weight
def __str__( self ) -> Any:
_UpperCAmelCase : List[Any] = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_UpperCAmelCase : List[str] = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _snake_case ( self ) -> Optional[int]:
return self.adjacency.keys()
@staticmethod
def _snake_case ( a_=None ,a_=None ) -> Tuple:
_UpperCAmelCase : List[Any] = Graph()
if vertices is None:
_UpperCAmelCase : List[str] = []
if edges is None:
_UpperCAmelCase : Optional[Any] = []
for vertex in vertices:
g.add_vertex(a_ )
for edge in edges:
g.add_edge(*a_ )
return g
class lowercase :
"""simple docstring"""
def __init__( self ) -> int:
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : int = {}
def __len__( self ) -> Tuple:
return len(self.parent )
def _snake_case ( self ,a_ ) -> str:
if item in self.parent:
return self.find(a_ )
_UpperCAmelCase : Optional[Any] = item
_UpperCAmelCase : List[Any] = 0
return item
def _snake_case ( self ,a_ ) -> List[str]:
if item not in self.parent:
return self.make_set(a_ )
if item != self.parent[item]:
_UpperCAmelCase : List[Any] = self.find(self.parent[item] )
return self.parent[item]
def _snake_case ( self ,a_ ,a_ ) -> Union[str, Any]:
_UpperCAmelCase : Any = self.find(a_ )
_UpperCAmelCase : List[str] = self.find(a_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_UpperCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_UpperCAmelCase : Any = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_UpperCAmelCase : List[str] = roota
return roota
return None
@staticmethod
def _snake_case ( a_ ) -> List[Any]:
_UpperCAmelCase : int = graph.num_vertices
_UpperCAmelCase : int = Graph.UnionFind()
_UpperCAmelCase : Optional[int] = []
while num_components > 1:
_UpperCAmelCase : int = {}
for vertex in graph.get_vertices():
_UpperCAmelCase : Union[str, Any] = -1
_UpperCAmelCase : Tuple = graph.get_edges()
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = edge
_UpperCAmelCase : Any = union_find.find(a_ )
_UpperCAmelCase : Any = union_find.find(a_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase : Tuple = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = cheap_edge[vertex]
if union_find.find(a_ ) != union_find.find(a_ ):
union_find.union(a_ ,a_ )
mst_edges.append(cheap_edge[vertex] )
_UpperCAmelCase : Tuple = num_components - 1
_UpperCAmelCase : Optional[int] = Graph.build(edges=a_ )
return mst
| 215 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 355 |
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case :
def __init__( self) ->Optional[int]:
a_ = WATERMARK_BITS
a_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images]
a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2)
a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0)
return images | 303 | 0 |
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = generate_pascal_triangle(lowerCAmelCase )
for row_idx in range(lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
SCREAMING_SNAKE_CASE_ : list[list[int]] = []
for current_row_idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = populate_current_row(lowerCAmelCase , lowerCAmelCase )
triangle.append(lowerCAmelCase )
return triangle
def _snake_case ( lowerCAmelCase : list[list[int]] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = 1, 1
for current_col_idx in range(1 , lowerCAmelCase ):
calculate_current_element(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return current_row
def _snake_case ( lowerCAmelCase : list[list[int]] , lowerCAmelCase : list[int] , lowerCAmelCase : int , lowerCAmelCase : int , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = triangle[current_row_idx - 1][current_col_idx - 1]
SCREAMING_SNAKE_CASE_ : List[Any] = triangle[current_row_idx - 1][current_col_idx]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = above_to_left_elt + above_to_right_elt
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
SCREAMING_SNAKE_CASE_ : list[list[int]] = [[1]]
for row_index in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = [0] + result[-1] + [0]
SCREAMING_SNAKE_CASE_ : List[str] = row_index + 1
# Calculate the number of distinct elements in a row
SCREAMING_SNAKE_CASE_ : Optional[Any] = sum(divmod(lowerCAmelCase , 2 ) )
SCREAMING_SNAKE_CASE_ : List[Any] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
SCREAMING_SNAKE_CASE_ : Tuple = row_first_half + row_second_half
result.append(lowerCAmelCase )
return result
def _snake_case ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase : Callable , lowerCAmelCase : int ) -> None:
SCREAMING_SNAKE_CASE_ : Optional[int] = f'{func.__name__}({value})'
SCREAMING_SNAKE_CASE_ : str = timeit(f'__main__.{call}' , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase , lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 18 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _UpperCAmelCase:
lowercase__ = BlenderbotConfig
lowercase__ = {}
lowercase__ = 'gelu'
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> str:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
_UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_blenderbot_inputs_dict(__a , __a , __a)
return config, inputs_dict
def UpperCAmelCase ( self , __a , __a) -> str:
'''simple docstring'''
_UpperCamelCase = TFBlenderbotModel(config=__a).get_decoder()
_UpperCamelCase = inputs_dict['''input_ids''']
_UpperCamelCase = input_ids[:1, :]
_UpperCamelCase = inputs_dict['''attention_mask'''][:1, :]
_UpperCamelCase = inputs_dict['''head_mask''']
_UpperCamelCase = 1
# first forward pass
_UpperCamelCase = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a)
_UpperCamelCase , _UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size)
_UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
_UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1)
_UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1)
_UpperCamelCase = model(__a , attention_mask=__a)[0]
_UpperCamelCase = model(__a , attention_mask=__a , past_key_values=__a)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
_UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1]))
_UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3)
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, __snake_case=None, __snake_case=None, __snake_case=None, ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = tf.cast(tf.math.not_equal(__snake_case, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowercase__ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowercase__ = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = TFBlenderbotModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a)
@require_tokenizers
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
lowercase__ = ['My friends are cool but they eat too many carbs.']
lowercase__ = 'facebook/blenderbot-400M-distill'
@cached_property
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name)
@cached_property
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , return_tensors='''tf''')
_UpperCamelCase = self.model.generate(
model_inputs.input_ids , )
_UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a)[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 366 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
if len(__snake_case ) <= 1 or n <= 1:
return
insert_next(__snake_case, n - 1 )
rec_insertion_sort(__snake_case, n - 1 )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
if index >= len(__snake_case ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_UpperCamelCase , _UpperCamelCase = (
collection[index],
collection[index - 1],
)
insert_next(__snake_case, index + 1 )
if __name__ == "__main__":
_a = input("""Enter integers separated by spaces: """)
_a = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 100 | 0 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase )
A = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A = dataset_size < in_memory_max_size
else:
A = False
A = is_small_dataset(UpperCAmelCase )
assert result == expected
| 258 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __a ( ) ->None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 258 | 1 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A : Any = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
__A : Dict = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
__A : int = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
"""simple docstring"""
def __lowercase ( self : List[Any] ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def __lowercase ( self : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Optional[int]=4 , lowerCamelCase : List[Any]=False ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = compute_bleu(
reference_corpus=lowerCamelCase , translation_corpus=lowerCamelCase , max_order=lowerCamelCase , smooth=lowerCamelCase )
((lowerCAmelCase_), (lowerCAmelCase_), (lowerCAmelCase_), (lowerCAmelCase_), (lowerCAmelCase_), (lowerCAmelCase_)) : List[str] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[str] = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 181 |
_SCREAMING_SNAKE_CASE : Optional[Any] = tuple[float, float, float]
_SCREAMING_SNAKE_CASE : Optional[Any] = tuple[float, float, float]
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = end_pointa[0] - end_pointa[0]
snake_case = end_pointa[1] - end_pointa[1]
snake_case = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = ab[1] * ac[2] - ab[2] * ac[1] # *i
snake_case = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
snake_case = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
return tuple(round(UpperCamelCase_ ,UpperCamelCase_ ) for x in vector ) == (0, 0, 0)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = 10 ):
"""simple docstring"""
snake_case = create_vector(UpperCamelCase_ ,UpperCamelCase_ )
snake_case = create_vector(UpperCamelCase_ ,UpperCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCamelCase_ ,UpperCamelCase_ ) ,UpperCamelCase_ )
| 127 | 0 |
def __A ( _lowercase ):
'''simple docstring'''
_A = len(_lowercase )
while cur > 1:
# Find the maximum number in arr
_A = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_A = arr[mi::-1] + arr[mi + 1 : len(_lowercase )]
# Reverse whole list
_A = arr[cur - 1 :: -1] + arr[cur : len(_lowercase )]
cur -= 1
return arr
if __name__ == "__main__":
__A = input('Enter numbers separated by a comma:\n').strip()
__A = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 75 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__A = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__A = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __A ( _lowercase ):
'''simple docstring'''
if "://" in dataset_path:
_A = dataset_path.split('''://''' )[1]
return dataset_path
def __A ( _lowercase ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = not is_remote_filesystem(_lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowercase ) , fs._strip_protocol(_lowercase ) )
else:
fs.mv(_lowercase , _lowercase , recursive=_lowercase )
def __A ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_A = None
_A = None
_A = threading.Lock()
| 75 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.