code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self: Optional[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[Any]=13 ,__lowerCAmelCase: Tuple=7 ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Optional[Any]=99 ,__lowerCAmelCase: Optional[int]=32 ,__lowerCAmelCase: Dict=5 ,__lowerCAmelCase: Any=4 ,__lowerCAmelCase: int=37 ,__lowerCAmelCase: Tuple="gelu" ,__lowerCAmelCase: Optional[int]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Union[str, Any]=512 ,__lowerCAmelCase: Optional[int]=16 ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: Optional[Any]=0.02 ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Tuple=4 ,__lowerCAmelCase: List[str]=None ,):
'''simple docstring'''
_lowerCamelCase : int = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : Union[str, Any] = is_training
_lowerCamelCase : Any = use_input_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Optional[int] = type_vocab_size
_lowerCamelCase : Dict = type_sequence_label_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : int = num_labels
_lowerCamelCase : List[str] = num_choices
_lowerCamelCase : Any = scope
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : int = None
if self.use_input_mask:
_lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[int] = None
if self.use_token_type_ids:
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCamelCase : List[Any] = None
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = None
if self.use_labels:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCamelCase : Dict = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCamelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self: Any ):
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,)
def _lowercase ( self: int ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: str ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Dict = NystromformerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )
_lowerCamelCase : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = NystromformerForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: str ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = NystromformerForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,start_positions=__lowerCAmelCase ,end_positions=__lowerCAmelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.num_labels
_lowerCamelCase : int = NystromformerForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowercase ( self: str ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.num_labels
_lowerCamelCase : int = NystromformerForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self: str ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.num_choices
_lowerCamelCase : List[Any] = NystromformerForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCamelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCamelCase : Any = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCamelCase : Optional[Any] = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : str = config_and_inputs
_lowerCamelCase : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = NystromformerModelTester(self )
_lowerCamelCase : Optional[Any] = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str = NystromformerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : str = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_lowerCamelCase : List[str] = model(__lowerCAmelCase )[0]
_lowerCamelCase : Dict = torch.Size((1, 6, 768) )
self.assertEqual(output.shape ,__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[Any] = "the [MASK] of Belgium is Brussels"
_lowerCamelCase : str = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
_lowerCamelCase : List[Any] = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
_lowerCamelCase : Optional[int] = tokenizer(__lowerCAmelCase ,return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : Dict = model(encoding.input_ids ).logits
_lowerCamelCase : Tuple = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) ,"capital" ) | 46 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase : Union[str, Any] = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowercase__( A , A=None ):
require_version(deps[pkg] , A )
| 170 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ) -> str:
# Return True if there is node that has not iterated.
_UpperCamelCase : Any = [False] * len(__lowerCAmelCase )
_UpperCamelCase : List[Any] = []
queue.append(__lowerCAmelCase )
_UpperCamelCase : List[str] = True
while queue:
_UpperCamelCase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCAmelCase )
_UpperCamelCase : Dict = True
_UpperCamelCase : Dict = u
return visited[t]
def __lowerCAmelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ) -> List[str]:
# This array is filled by BFS and to store path
_UpperCamelCase : Tuple = [-1] * (len(__lowerCAmelCase ))
_UpperCamelCase : List[Any] = 0
while bfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCamelCase : Union[str, Any] = float("Inf" )
_UpperCamelCase : Union[str, Any] = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : List[str] = min(__lowerCAmelCase , graph[parent[s]][s] )
_UpperCamelCase : List[str] = parent[s]
max_flow += path_flow
_UpperCamelCase : Any = sink
while v != source:
_UpperCamelCase : Optional[int] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : Optional[int] = parent[v]
return max_flow
_SCREAMING_SNAKE_CASE = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0, 5
print(ford_fulkerson(graph, source, sink))
| 239 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 239 | 1 |
"""simple docstring"""
import random
from typing import Any
def snake_case ( lowerCAmelCase_ ) -> list[Any]:
for _ in range(len(lowerCAmelCase_ ) ):
_snake_case = random.randint(0 , len(lowerCAmelCase_ ) - 1 )
_snake_case = random.randint(0 , len(lowerCAmelCase_ ) - 1 )
_snake_case , _snake_case = data[b], data[a]
return data
if __name__ == "__main__":
snake_case = [0, 1, 2, 3, 4, 5, 6, 7]
snake_case = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 103 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any]=13 , lowerCamelCase : List[Any]=7 , lowerCamelCase : List[Any]=True , lowerCamelCase : str=True , lowerCamelCase : List[str]=True , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[int]=99 , lowerCamelCase : int=32 , lowerCamelCase : str=5 , lowerCamelCase : str=4 , lowerCamelCase : List[str]=37 , lowerCamelCase : str="gelu" , lowerCamelCase : Dict=0.1 , lowerCamelCase : Any=0.1 , lowerCamelCase : Union[str, Any]=512 , lowerCamelCase : Optional[int]=16 , lowerCamelCase : Any=2 , lowerCamelCase : Any=0.02 , lowerCamelCase : List[Any]=False , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Optional[int]="None" , lowerCamelCase : Tuple=3 , lowerCamelCase : int=4 , lowerCamelCase : Optional[Any]=None , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = relative_attention
_UpperCAmelCase = position_biased_input
_UpperCAmelCase = pos_att_type
_UpperCAmelCase = scope
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Any ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = DebertaVaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase )[0]
_UpperCAmelCase = model(lowerCamelCase , token_type_ids=lowerCamelCase )[0]
_UpperCAmelCase = model(lowerCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCamelCase ( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = DebertaVaForMaskedLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : str , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DebertaVaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCamelCase )
def lowerCamelCase ( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DebertaVaForTokenClassification(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self : Any , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : str ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = DebertaVaForQuestionAnswering(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : str ) -> str:
"""simple docstring"""
_UpperCAmelCase = DebertaVaForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = DebertaVaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def lowerCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCamelCase )
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase )
def lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase )
def lowerCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase )
def lowerCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase )
def lowerCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCamelCase )
@slow
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = DebertaVaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def lowerCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@slow
def lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_UpperCAmelCase = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
# compare the actual values for a slice.
_UpperCAmelCase = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" ) | 108 | 0 |
from __future__ import annotations
def a_ ( _A , _A ) -> str:
"""simple docstring"""
# Checks if the entire collection has been sorted
if len(_A ) <= 1 or n <= 1:
return
insert_next(_A , n - 1 )
rec_insertion_sort(_A , n - 1 )
def a_ ( _A , _A ) -> Tuple:
"""simple docstring"""
# Checks order between adjacent elements
if index >= len(_A ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
snake_case__ , snake_case__ = (
collection[index],
collection[index - 1],
)
insert_next(_A , index + 1 )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = input("""Enter integers separated by spaces: """)
__UpperCamelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 372 |
def a_ ( _A = 4000000 ) -> int:
"""simple docstring"""
snake_case__ = [0, 1]
snake_case__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
snake_case__ = 0
for j in range(len(_A ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 372 | 1 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
a_ :Tuple = namedtuple('covid_data', 'cases deaths recovered')
def a ( A__ = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(A__ ).content ).xpath(A__ ) )
a_ :str = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 35 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
__lowerCamelCase = '''facebook/wmt19-en-de'''
__lowerCamelCase = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
__lowerCamelCase = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
__lowerCamelCase = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
__lowerCamelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__lowerCamelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
__lowerCamelCase = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 716 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__lowerCamelCase = logging.getLogger(__name__)
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: str = """summarization"""
lowerCamelCase__: List[str] = ["""loss"""]
lowerCamelCase__: List[str] = ROUGE_KEYS
lowerCamelCase__: Union[str, Any] = """rouge2"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ):
if hparams.sortish_sampler and hparams.gpus > 1:
a_ : Any = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowerCamelCase_ , num_labels=lowerCamelCase_ , mode=self.mode , **lowerCamelCase_ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
a_ : Any = Path(self.output_dir ) / """metrics.json"""
a_ : Tuple = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
a_ : Dict = 0
a_ : Optional[Any] = defaultdict(lowerCamelCase_ )
a_ : str = self.config.model_type
a_ : Union[str, Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
a_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
a_ : int = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
a_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
a_ : Optional[Any] = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
a_ : int = get_git_info()["""repo_sha"""]
a_ : Union[str, Any] = hparams.num_workers
a_ : Tuple = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCamelCase_ ):
a_ : Dict = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
a_ : List[str] = self.decoder_start_token_id
a_ : Union[str, Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
a_ : Union[str, Any] = False
a_ : Any = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
a_ : Union[str, Any] = self.hparams.eval_max_gen_length
else:
a_ : Optional[Any] = self.model.config.max_length
a_ : Optional[int] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCAmelCase( self : List[Any] , lowerCamelCase_ : Dict[str, torch.Tensor] ):
a_ : Optional[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCamelCase_ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
a_ : List[Any] = True
return readable_batch
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
return self.model(lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : List[int] ):
a_ : str = self.tokenizer.batch_decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
return lmap(str.strip , lowerCamelCase_ )
def UpperCAmelCase( self : Optional[int] , lowerCamelCase_ : dict ):
a_ : int = self.tokenizer.pad_token_id
a_ , a_ : Optional[int] = batch["""input_ids"""], batch["""attention_mask"""]
a_ : Optional[Any] = batch["""labels"""]
if isinstance(self.model , lowerCamelCase_ ):
a_ : List[Any] = self.model._shift_right(lowerCamelCase_ )
else:
a_ : Dict = shift_tokens_right(lowerCamelCase_ , lowerCamelCase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
a_ : int = decoder_input_ids
self.save_readable_batch(lowerCamelCase_ )
a_ : str = self(lowerCamelCase_ , attention_mask=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , use_cache=lowerCamelCase_ )
a_ : int = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
a_ : Any = nn.CrossEntropyLoss(ignore_index=lowerCamelCase_ )
assert lm_logits.shape[-1] == self.vocab_size
a_ : Union[str, Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
a_ : int = nn.functional.log_softmax(lowerCamelCase_ , dim=-1 )
a_ , a_ : str = label_smoothed_nll_loss(
lowerCamelCase_ , lowerCamelCase_ , self.hparams.label_smoothing , ignore_index=lowerCamelCase_ )
return (loss,)
@property
def UpperCAmelCase( self : Union[str, Any] ):
return self.tokenizer.pad_token_id
def UpperCAmelCase( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] ):
a_ : Dict = self._step(lowerCamelCase_ )
a_ : Optional[int] = dict(zip(self.loss_names , lowerCamelCase_ ) )
# tokens per batch
a_ : str = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
a_ : Optional[int] = batch["""input_ids"""].shape[0]
a_ : int = batch["""input_ids"""].eq(self.pad ).sum()
a_ : str = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCAmelCase( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ):
return self._generative_step(lowerCamelCase_ )
def UpperCAmelCase( self : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int]="val" ):
self.step_count += 1
a_ : Optional[Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
a_ : Tuple = losses["""loss"""]
a_ : Optional[int] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
a_ : List[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
a_ : torch.FloatTensor = torch.tensor(lowerCamelCase_ ).type_as(lowerCamelCase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCamelCase_ )
a_ : str = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
a_ : Union[str, Any] = self.step_count
self.metrics[prefix].append(lowerCamelCase_ ) # callback writes this to self.metrics_save_path
a_ : List[str] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCAmelCase( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
return calculate_rouge(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : int , lowerCamelCase_ : dict ):
a_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
a_ : Any = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowerCamelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
a_ : Optional[int] = (time.time() - ta) / batch["""input_ids"""].shape[0]
a_ : List[str] = self.ids_to_clean_text(lowerCamelCase_ )
a_ : List[str] = self.ids_to_clean_text(batch["""labels"""] )
a_ : Tuple = self._step(lowerCamelCase_ )
a_ : List[str] = dict(zip(self.loss_names , lowerCamelCase_ ) )
a_ : Dict = self.calc_generative_metrics(lowerCamelCase_ , lowerCamelCase_ )
a_ : Optional[int] = np.mean(lmap(lowerCamelCase_ , lowerCamelCase_ ) )
base_metrics.update(gen_time=lowerCamelCase_ , gen_len=lowerCamelCase_ , preds=lowerCamelCase_ , target=lowerCamelCase_ , **lowerCamelCase_ )
return base_metrics
def UpperCAmelCase( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
return self._generative_step(lowerCamelCase_ )
def UpperCAmelCase( self : str , lowerCamelCase_ : Any ):
return self.validation_epoch_end(lowerCamelCase_ , prefix="""test""" )
def UpperCAmelCase( self : Any , lowerCamelCase_ : Any ):
a_ : List[str] = self.n_obs[type_path]
a_ : Dict = self.target_lens[type_path]
a_ : Optional[Any] = self.dataset_class(
self.tokenizer , type_path=lowerCamelCase_ , n_obs=lowerCamelCase_ , max_target_length=lowerCamelCase_ , **self.dataset_kwargs , )
return dataset
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : bool = False ):
a_ : List[str] = self.get_dataset(lowerCamelCase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
a_ : List[str] = dataset.make_sortish_sampler(lowerCamelCase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
a_ : int = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , )
def UpperCAmelCase( self : Any ):
a_ : int = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowerCamelCase_ )
return dataloader
def UpperCAmelCase( self : Dict ):
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def UpperCAmelCase( self : List[Any] ):
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCAmelCase( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
BaseTransformer.add_model_specific_args(lowerCamelCase_ , lowerCamelCase_ )
add_generic_args(lowerCamelCase_ , lowerCamelCase_ )
parser.add_argument(
"""--max_source_length""" , default=1_0_2_4 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=5_6 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_4_2 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_4_2 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowerCamelCase_ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowerCamelCase_ )
parser.add_argument("""--max_tokens_per_batch""" , type=lowerCamelCase_ , default=lowerCamelCase_ )
parser.add_argument("""--logger_name""" , type=lowerCamelCase_ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowerCamelCase_ , default=5_0_0 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowerCamelCase_ , default="""summarization""" , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowerCamelCase_ , default=0.0 , required=lowerCamelCase_ )
parser.add_argument("""--src_lang""" , type=lowerCamelCase_ , default="""""" , required=lowerCamelCase_ )
parser.add_argument("""--tgt_lang""" , type=lowerCamelCase_ , default="""""" , required=lowerCamelCase_ )
parser.add_argument("""--eval_beams""" , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ )
parser.add_argument(
"""--val_metric""" , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowerCamelCase_ , default=1 , required=lowerCamelCase_ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: List[Any] = """translation"""
lowerCamelCase__: int = ["""loss"""]
lowerCamelCase__: List[str] = ["""bleu"""]
lowerCamelCase__: Optional[Any] = """bleu"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Tuple ):
super().__init__(lowerCamelCase_ , **lowerCamelCase_ )
a_ : Union[str, Any] = hparams.src_lang
a_ : Optional[int] = hparams.tgt_lang
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
return calculate_bleu(lowerCamelCase_ , lowerCamelCase_ )
def _a ( __UpperCamelCase , __UpperCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__UpperCamelCase )
check_output_dir(__UpperCamelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
a_ : SummarizationModule = SummarizationModule(__UpperCamelCase )
else:
a_ : SummarizationModule = TranslationModule(__UpperCamelCase )
a_ : Dict = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
a_ : Union[str, Any] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
a_ : Optional[int] = os.environ.get("""WANDB_PROJECT""" , __UpperCamelCase )
a_ : str = WandbLogger(name=model.output_dir.name , project=__UpperCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
a_ : Optional[int] = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
a_ : Any = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
a_ : str = False
a_ : Tuple = args.val_metric == """loss"""
a_ : pl.Trainer = generic_train(
__UpperCamelCase , __UpperCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __UpperCamelCase ) , early_stopping_callback=__UpperCamelCase , logger=__UpperCamelCase , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
a_ : Union[str, Any] = """"""
a_ : int = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=__UpperCamelCase ) )
if checkpoints:
a_ : Tuple = checkpoints[-1]
a_ : Tuple = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
__lowerCamelCase = pl.Trainer.add_argparse_args(parser)
__lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase = parser.parse_args()
main(args)
| 478 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
__snake_case = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = {}
with open(_lowercase , 'r' ) as file:
for line_number, line in enumerate(_lowercase ):
__UpperCamelCase = line.strip()
if line:
__UpperCamelCase = line.split()
__UpperCamelCase = line_number
__UpperCamelCase = words[0]
__UpperCamelCase = value
return result
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
"""simple docstring"""
for attribute in key.split('.' ):
__UpperCamelCase = getattr(_lowercase , _lowercase )
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowercase ):
__UpperCamelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
__UpperCamelCase = 'param'
if weight_type is not None and weight_type != "param":
__UpperCamelCase = getattr(_lowercase , _lowercase ).shape
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = hf_pointer
for attribute in hf_param_name.split('.' ):
__UpperCamelCase = getattr(_lowercase , _lowercase )
__UpperCamelCase = shape_pointer.shape
# let's reduce dimension
__UpperCamelCase = value[0]
else:
__UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
__UpperCamelCase = getattr(_lowercase , _lowercase )
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowercase ):
__UpperCamelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
__UpperCamelCase = 'param'
if weight_type is not None and weight_type != "param":
__UpperCamelCase = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = '.'.join([key, hf_param_name] )
else:
__UpperCamelCase = key
__UpperCamelCase = value if 'lm_head' in full_key else value[0]
__snake_case = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def _A ( _lowercase , _lowercase , _lowercase=None , _lowercase=None ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = False
for key, mapped_key in MAPPING.items():
__UpperCamelCase = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(_lowercase )[0].split('.' )[-2]
__UpperCamelCase = mapped_key.replace('*' , _lowercase )
if "weight_g" in name:
__UpperCamelCase = 'weight_g'
elif "weight_v" in name:
__UpperCamelCase = 'weight_v'
elif "bias" in name:
__UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase = 'weight'
else:
__UpperCamelCase = None
if hf_dict is not None:
rename_dict(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
else:
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
return is_used
return is_used
def _A ( _lowercase , _lowercase , _lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == 'group' , )
__UpperCamelCase = True
else:
__UpperCamelCase = load_wavaveca_layer(_lowercase , _lowercase , _lowercase )
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = full_name.split('conv_layers.' )[-1]
__UpperCamelCase = name.split('.' )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowercase )
@torch.no_grad()
def _A ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase=False ) -> Dict:
"""simple docstring"""
if config_path is not None:
__UpperCamelCase = WavaVecaConfig.from_pretrained(_lowercase )
else:
__UpperCamelCase = WavaVecaConfig()
if is_seq_class:
__UpperCamelCase = read_txt_into_dict(_lowercase )
__UpperCamelCase = idalabel
__UpperCamelCase = WavaVecaForSequenceClassification(_lowercase )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
feature_extractor.save_pretrained(_lowercase )
elif is_finetuned:
if dict_path:
__UpperCamelCase = Dictionary.load(_lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase = target_dict.pad_index
__UpperCamelCase = target_dict.bos_index
__UpperCamelCase = target_dict.eos_index
__UpperCamelCase = len(target_dict.symbols )
__UpperCamelCase = os.path.join(_lowercase , 'vocab.json' )
if not os.path.isdir(_lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_lowercase ) )
return
os.makedirs(_lowercase , exist_ok=_lowercase )
__UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase = 0
__UpperCamelCase = 1
with open(_lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_lowercase , _lowercase )
__UpperCamelCase = WavaVecaCTCTokenizer(
_lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_lowercase , )
__UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
__UpperCamelCase = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
processor.save_pretrained(_lowercase )
__UpperCamelCase = WavaVecaForCTC(_lowercase )
else:
__UpperCamelCase = WavaVecaForPreTraining(_lowercase )
if is_finetuned or is_seq_class:
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__UpperCamelCase = argparse.Namespace(task='audio_pretraining' )
__UpperCamelCase = fairseq.tasks.setup_task(_lowercase )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowercase )
__UpperCamelCase = model[0].eval()
recursively_load_weights(_lowercase , _lowercase , not is_finetuned )
hf_wavavec.save_pretrained(_lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
__snake_case = parser.parse_args()
__snake_case = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 1 | 1 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ : Union[str, Any] =10
A__ : Dict =datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
A__ : Dict =datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(__snake_case ) ),
}, features=__snake_case, )
return dataset
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Optional[int] ) -> int:
"""simple docstring"""
A__ : str =str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__snake_case = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> int:
"""simple docstring"""
A__ : List[str] =tmp_path_factory.mktemp("""data""" ) / """file.txt"""
A__ : List[Any] =FILE_CONTENT
with open(__snake_case, """w""" ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : List[Any] ) -> str:
"""simple docstring"""
import bza
A__ : Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
A__ : str =bytes(__snake_case, """utf-8""" )
with bza.open(__snake_case, """wb""" ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> int:
"""simple docstring"""
import gzip
A__ : Dict =str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
A__ : List[str] =bytes(__snake_case, """utf-8""" )
with gzip.open(__snake_case, """wb""" ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
A__ : List[str] =tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
A__ : List[Any] =bytes(__snake_case, """utf-8""" )
with lza.frame.open(__snake_case, """wb""" ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
A__ : Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(__snake_case, """w""" ) as archive:
archive.write(__snake_case, arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : List[Any] ) -> int:
"""simple docstring"""
import tarfile
A__ : List[str] =tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(__snake_case, """w""" ) as f:
f.add(__snake_case, arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : List[str] ) -> str:
"""simple docstring"""
import lzma
A__ : Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
A__ : str =bytes(__snake_case, """utf-8""" )
with lzma.open(__snake_case, """wb""" ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : List[str], __snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
import zipfile
A__ : Any =tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(__snake_case, """w""" ) as f:
f.write(__snake_case, arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
A__ : Optional[int] =tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
A__ : Any =bytes(__snake_case, """utf-8""" )
with zstd.open(__snake_case, """wb""" ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
A__ : Dict =tmp_path_factory.mktemp("""data""" ) / """file.xml"""
A__ : Optional[Any] =textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(__snake_case, """w""" ) as f:
f.write(__snake_case )
return filename
__snake_case = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__snake_case = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__snake_case = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__snake_case = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__snake_case = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Dict ) -> Tuple:
"""simple docstring"""
A__ : Dict =datasets.Dataset.from_dict(__snake_case )
A__ : Dict =str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : List[str] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
A__ : Any =con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""", tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(__snake_case, """w""", newline="""""" ) as f:
A__ : Tuple =csv.DictWriter(__snake_case, fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : str =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(__snake_case, """w""", newline="""""" ) as f:
A__ : str =csv.DictWriter(__snake_case, fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : str, __snake_case : int ) -> Tuple:
"""simple docstring"""
import bza
A__ : List[str] =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(__snake_case, """rb""" ) as f:
A__ : Optional[Any] =f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case, """wb""" ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[int], __snake_case : Dict ) -> List[str]:
"""simple docstring"""
A__ : List[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__snake_case, """w""" ) as f:
f.write(__snake_case, arcname=os.path.basename(__snake_case ) )
f.write(__snake_case, arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Any, __snake_case : List[Any], __snake_case : str ) -> Optional[int]:
"""simple docstring"""
A__ : Tuple =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(__snake_case, """w""" ) as f:
f.write(__snake_case, arcname=os.path.basename(csv_path.replace(""".csv""", """.CSV""" ) ) )
f.write(__snake_case, arcname=os.path.basename(csva_path.replace(""".csv""", """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Tuple, __snake_case : Any ) -> str:
"""simple docstring"""
A__ : str =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(__snake_case, """w""" ) as f:
f.write(__snake_case, arcname=os.path.join("""main_dir""", os.path.basename(__snake_case ) ) )
f.write(__snake_case, arcname=os.path.join("""main_dir""", os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
A__ : Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
A__ : Optional[int] =pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(__snake_case, """wb""" ) as f:
A__ : List[str] =pq.ParquetWriter(__snake_case, schema=__snake_case )
A__ : Tuple =pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]}, schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Tuple ) -> str:
"""simple docstring"""
A__ : Union[str, Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
A__ : List[Any] ={"""data""": DATA}
with open(__snake_case, """w""" ) as f:
json.dump(__snake_case, __snake_case )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
A__ : Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
A__ : Dict ={"""data""": DATA_DICT_OF_LISTS}
with open(__snake_case, """w""" ) as f:
json.dump(__snake_case, __snake_case )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Dict ) -> int:
"""simple docstring"""
A__ : List[str] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(__snake_case, """w""" ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Tuple ) -> str:
"""simple docstring"""
A__ : List[str] =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(__snake_case, """w""" ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : str ) -> Dict:
"""simple docstring"""
A__ : str =str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(__snake_case, """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Dict ) -> Dict:
"""simple docstring"""
A__ : Optional[int] =str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(__snake_case, """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
import gzip
A__ : Dict =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(__snake_case, """rb""" ) as orig_file:
with gzip.open(__snake_case, """wb""" ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Tuple ) -> Any:
"""simple docstring"""
import gzip
A__ : int =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(__snake_case, """rb""" ) as orig_file:
with gzip.open(__snake_case, """wb""" ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : List[str], __snake_case : str, __snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
A__ : List[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(__snake_case, """w""" ) as f:
f.write(__snake_case, arcname=os.path.basename(__snake_case ) )
f.write(__snake_case, arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Union[str, Any], __snake_case : Dict, __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(__snake_case, """w""" ) as f:
f.write(__snake_case, arcname=os.path.join("""nested""", os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : str, __snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(__snake_case, """w""" ) as f:
f.write(__snake_case, arcname=os.path.join("""main_dir""", os.path.basename(__snake_case ) ) )
f.write(__snake_case, arcname=os.path.join("""main_dir""", os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : str, __snake_case : Any ) -> List[str]:
"""simple docstring"""
A__ : List[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(__snake_case, """w""" ) as f:
f.add(__snake_case, arcname=os.path.basename(__snake_case ) )
f.add(__snake_case, arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Dict, __snake_case : List[str], __snake_case : List[str], __snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(__snake_case, """w""" ) as f:
f.add(__snake_case, arcname=os.path.join("""nested""", os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
A__ : int =["""0""", """1""", """2""", """3"""]
A__ : Any =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(__snake_case, """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : str ) -> Optional[Any]:
"""simple docstring"""
A__ : Union[str, Any] =["""0""", """1""", """2""", """3"""]
A__ : Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(__snake_case, """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[int] =["""0""", """1""", """2""", """3"""]
A__ : Dict =tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(__snake_case, """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Any, __snake_case : int ) -> str:
"""simple docstring"""
A__ : Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(__snake_case, """w""" ) as f:
f.write(__snake_case, arcname=os.path.basename(__snake_case ) )
f.write(__snake_case, arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : str, __snake_case : Union[str, Any], __snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ : str =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(__snake_case, """w""" ) as f:
f.write(__snake_case, arcname=os.path.join("""main_dir""", os.path.basename(__snake_case ) ) )
f.write(__snake_case, arcname=os.path.join("""main_dir""", os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : List[Any], __snake_case : Tuple ) -> int:
"""simple docstring"""
A__ : Any =tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(__snake_case, """w""" ) as f:
f.write(__snake_case, arcname=os.path.basename("""unsupported.ext""" ) )
f.write(__snake_case, arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[str] ="""\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
A__ : Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(__snake_case, """w""", encoding="""utf-8""" ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
return os.path.join("""tests""", """features""", """data""", """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
return os.path.join("""tests""", """features""", """data""", """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : int, __snake_case : List[str] ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(__snake_case, """w""" ) as f:
f.write(__snake_case, arcname=os.path.basename(__snake_case ) )
f.write(__snake_case, arcname=os.path.basename(__snake_case ).replace(""".jpg""", """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( __snake_case : Optional[int] ) -> str:
"""simple docstring"""
A__ : Dict =tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""", """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""", """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""", """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""", """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""", """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : Union[str, Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 0 |
class UpperCamelCase:
def __init__( self : Any ) -> List[str]:
'''simple docstring'''
__snake_case = 0
__snake_case = 0
__snake_case = {}
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
'''simple docstring'''
if vertex not in self.adjacency:
__snake_case = {}
self.num_vertices += 1
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
'''simple docstring'''
self.add_vertex(SCREAMING_SNAKE_CASE )
self.add_vertex(SCREAMING_SNAKE_CASE )
if head == tail:
return
__snake_case = weight
__snake_case = weight
def SCREAMING_SNAKE_CASE_ ( self : str ) -> int:
'''simple docstring'''
__snake_case = self.get_edges()
for edge in edges:
__snake_case , __snake_case , __snake_case = edge
edges.remove((tail, head, weight) )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__snake_case = list(edges[i] )
edges.sort(key=lambda SCREAMING_SNAKE_CASE : e[2] )
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__snake_case = edges[i][2] + 1
for edge in edges:
__snake_case , __snake_case , __snake_case = edge
__snake_case = weight
__snake_case = weight
def __str__( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__snake_case = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
__snake_case = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=None ) -> Tuple:
'''simple docstring'''
__snake_case = Graph()
if vertices is None:
__snake_case = []
if edges is None:
__snake_case = []
for vertex in vertices:
g.add_vertex(SCREAMING_SNAKE_CASE )
for edge in edges:
g.add_edge(*SCREAMING_SNAKE_CASE )
return g
class UpperCamelCase:
def __init__( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case = {}
__snake_case = {}
def __len__( self : Optional[int] ) -> int:
'''simple docstring'''
return len(self.parent )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Any ) -> List[str]:
'''simple docstring'''
if item in self.parent:
return self.find(SCREAMING_SNAKE_CASE )
__snake_case = item
__snake_case = 0
return item
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(SCREAMING_SNAKE_CASE )
if item != self.parent[item]:
__snake_case = self.find(self.parent[item] )
return self.parent[item]
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
'''simple docstring'''
__snake_case = self.find(SCREAMING_SNAKE_CASE )
__snake_case = self.find(SCREAMING_SNAKE_CASE )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__snake_case = roota
return roota
if self.rank[roota] < self.rank[roota]:
__snake_case = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__snake_case = roota
return roota
return None
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
'''simple docstring'''
__snake_case = graph.num_vertices
__snake_case = Graph.UnionFind()
__snake_case = []
while num_components > 1:
__snake_case = {}
for vertex in graph.get_vertices():
__snake_case = -1
__snake_case = graph.get_edges()
for edge in edges:
__snake_case , __snake_case , __snake_case = edge
edges.remove((tail, head, weight) )
for edge in edges:
__snake_case , __snake_case , __snake_case = edge
__snake_case = union_find.find(SCREAMING_SNAKE_CASE )
__snake_case = union_find.find(SCREAMING_SNAKE_CASE )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__snake_case = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__snake_case = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__snake_case , __snake_case , __snake_case = cheap_edge[vertex]
if union_find.find(SCREAMING_SNAKE_CASE ) != union_find.find(SCREAMING_SNAKE_CASE ):
union_find.union(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
mst_edges.append(cheap_edge[vertex] )
__snake_case = num_components - 1
__snake_case = Graph.build(edges=SCREAMING_SNAKE_CASE )
return mst
| 371 |
def _lowerCAmelCase ( _lowerCAmelCase = 100 ) -> int:
'''simple docstring'''
__snake_case = n * (n + 1) * (2 * n + 1) / 6
__snake_case = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 371 | 1 |
def snake_case (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return abs(UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a , UpperCamelCase )
def snake_case (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowerCamelCase__ , lowerCamelCase__ = y, x % y
return abs(UpperCamelCase )
def snake_case ():
'''simple docstring'''
try:
lowerCamelCase__ = input("""Enter two integers separated by comma (,): """ ).split(""",""" )
lowerCamelCase__ = int(nums[0] )
lowerCamelCase__ = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(UpperCamelCase , UpperCamelCase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCamelCase , UpperCamelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("""Wrong input""" )
if __name__ == "__main__":
main()
| 235 |
def snake_case (UpperCamelCase : int , UpperCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
lowerCamelCase__ = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
lowerCamelCase__ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase__ = primes[:idx]
break
lowerCamelCase__ , lowerCamelCase__ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase__ = False
for r in range(UpperCamelCase ):
lowerCamelCase__ = pow(UpperCamelCase , d * 2**r , UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase__ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def snake_case ():
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 235 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A = logging.get_logger(__name__)
class __snake_case ( a__):
def __init__( self, *A, **A ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.', A, )
super().__init__(*A, **A )
| 320 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case ,snake_case=13 ,snake_case=7 ,snake_case=True ,snake_case=True ,snake_case=True ,snake_case=True ,snake_case=99 ,snake_case=32 ,snake_case=5 ,snake_case=4 ,snake_case=37 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=16 ,snake_case=2 ,snake_case=0.02 ,snake_case=False ,snake_case=True ,snake_case="None" ,snake_case=3 ,snake_case=4 ,snake_case=None ,):
'''simple docstring'''
lowercase : List[Any] = parent
lowercase : int = batch_size
lowercase : Any = seq_length
lowercase : Dict = is_training
lowercase : int = use_input_mask
lowercase : Optional[int] = use_token_type_ids
lowercase : Optional[Any] = use_labels
lowercase : Union[str, Any] = vocab_size
lowercase : List[str] = hidden_size
lowercase : str = num_hidden_layers
lowercase : Union[str, Any] = num_attention_heads
lowercase : List[str] = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : List[str] = hidden_dropout_prob
lowercase : Tuple = attention_probs_dropout_prob
lowercase : Optional[int] = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : List[str] = type_sequence_label_size
lowercase : Union[str, Any] = initializer_range
lowercase : str = num_labels
lowercase : Tuple = num_choices
lowercase : List[str] = relative_attention
lowercase : List[Any] = position_biased_input
lowercase : List[str] = pos_att_type
lowercase : Tuple = scope
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : List[str] = None
if self.use_input_mask:
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
lowercase : List[Any] = None
if self.use_token_type_ids:
lowercase : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowercase : Optional[int] = None
lowercase : Dict = None
lowercase : str = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Tuple = DebertaVaModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : str = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case )[0]
lowercase : Tuple = model(snake_case ,token_type_ids=snake_case )[0]
lowercase : Optional[Any] = model(snake_case )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = DebertaVaForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : List[str] = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Any = self.num_labels
lowercase : Dict = DebertaVaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase : Dict = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = self.num_labels
lowercase : Optional[int] = DebertaVaForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : List[Any] = model(snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Tuple = DebertaVaForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : Tuple = model(
snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,start_positions=snake_case ,end_positions=snake_case ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = DebertaVaForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase : Optional[Any] = model(
snake_case ,attention_mask=snake_case ,token_type_ids=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : List[str] = config_and_inputs
lowercase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Dict= (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_a : str= (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_a : Any= True
_a : List[Any]= False
_a : List[Any]= False
_a : Any= False
_a : Optional[Any]= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = DebertaVaModelTester(self )
lowercase : List[Any] = ConfigTester(self ,config_class=snake_case ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[Any] = DebertaVaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
lowercase : Any = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowercase : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : str = model(snake_case ,attention_mask=snake_case )[0]
# compare the actual values for a slice.
lowercase : Tuple = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,snake_case ,atol=1e-4 ) ,f"{output[:, 1:4, 1:4]}" )
| 336 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase : int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 106 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__UpperCamelCase : Any = None
try:
import msvcrt
except ImportError:
__UpperCamelCase : Optional[Any] = None
try:
import fcntl
except ImportError:
__UpperCamelCase : Union[str, Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__UpperCamelCase : Any = OSError
# Data
# ------------------------------------------------
__UpperCamelCase : Optional[int] = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
__UpperCamelCase : Dict = "3.0.12"
__UpperCamelCase : str = None
def _a ( ):
"""simple docstring"""
global _logger
UpperCamelCase__ : Tuple = _logger or logging.getLogger(__name__ )
return _logger
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Any , lowerCamelCase__ : List[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[str] = lock_file
return None
def __str__( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class __magic_name__ :
def __init__( self : List[str] , lowerCamelCase__ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = lock
return None
def __enter__( self : Tuple ) -> str:
'''simple docstring'''
return self.lock
def __exit__( self : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
self.lock.release()
return None
class __magic_name__ :
def __init__( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[str]=-1 , lowerCamelCase__ : Any=None ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : int = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase__ : List[Any] = self.hash_filename_if_too_long(lowerCamelCase__ , lowerCamelCase__ )
# The path to the lock file.
UpperCamelCase__ : Tuple = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase__ : int = None
# The default timeout value.
UpperCamelCase__ : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase__ : List[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase__ : Any = 0
return None
@property
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return self._lock_file
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Any ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : List[str] = float(lowerCamelCase__ )
return None
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : List[str]=0.05 ) -> int:
'''simple docstring'''
if timeout is None:
UpperCamelCase__ : int = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase__ : int = id(self )
UpperCamelCase__ : List[Any] = self._lock_file
UpperCamelCase__ : Dict = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(lowerCamelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase__ : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int]=False ) -> Any:
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase__ : List[str] = id(self )
UpperCamelCase__ : Union[str, Any] = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
UpperCamelCase__ : Optional[Any] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.acquire()
return self
def __exit__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
self.release()
return None
def __del__( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.release(force=lowerCamelCase__ )
return None
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : int ) -> str:
'''simple docstring'''
UpperCamelCase__ : str = os.path.basename(lowerCamelCase__ )
if len(lowerCamelCase__ ) > max_length and max_length > 0:
UpperCamelCase__ : Optional[int] = os.path.dirname(lowerCamelCase__ )
UpperCamelCase__ : List[str] = str(hash(lowerCamelCase__ ) )
UpperCamelCase__ : Optional[int] = filename[: max_length - len(lowerCamelCase__ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(lowerCamelCase__ , lowerCamelCase__ )
else:
return path
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : int , lowerCamelCase__ : Dict , lowerCamelCase__ : str=-1 , lowerCamelCase__ : int=None ) -> Optional[int]:
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
UpperCamelCase__ : int = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase__ : Union[str, Any] = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCamelCase__ )
else:
UpperCamelCase__ : Union[str, Any] = fd
return None
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : str = self._lock_file_fd
UpperCamelCase__ : List[str] = None
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCamelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : str=-1 , lowerCamelCase__ : int=None ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = os.statvfs(os.path.dirname(lowerCamelCase__ ) ).f_namemax
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase__ : int = os.open(self._lock_file , lowerCamelCase__ )
try:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase__ )
else:
UpperCamelCase__ : Any = fd
return None
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Tuple = self._lock_file_fd
UpperCamelCase__ : int = None
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_UN )
os.close(lowerCamelCase__ )
return None
class __magic_name__ ( __lowerCAmelCase):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : str = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase__ : Any = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
UpperCamelCase__ : Optional[Any] = fd
return None
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCamelCase__ : List[str] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__UpperCamelCase : Tuple = None
if msvcrt:
__UpperCamelCase : str = WindowsFileLock
elif fcntl:
__UpperCamelCase : Optional[Any] = UnixFileLock
else:
__UpperCamelCase : Optional[Any] = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 106 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = len(lowercase )
for i in range(lowercase ):
for j in range(i + 1 ,lowercase ):
if numbers[j] < numbers[i]:
_UpperCAmelCase , _UpperCAmelCase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
UpperCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 277 | """simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class a ( lowerCAmelCase_ ):
_snake_case : Dict = CustomTokenizer
pass
| 277 | 1 |
"""simple docstring"""
from __future__ import annotations
def _A ( _a : int , _a : int ):
"""simple docstring"""
A = []
create_all_state(1 , _a , _a , [] , _a )
return result
def _A ( _a : int , _a : int , _a : int , _a : list[int] , _a : list[list[int]] , ):
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_a , total_number - level + 2 ):
current_list.append(_a )
create_all_state(i + 1 , _a , level - 1 , _a , _a )
current_list.pop()
def _A ( _a : list[list[int]] ):
"""simple docstring"""
for i in total_list:
print(*_a )
if __name__ == "__main__":
UpperCAmelCase =4
UpperCAmelCase =2
UpperCAmelCase =generate_all_combinations(n, k)
print_all_state(total_list)
| 255 |
"""simple docstring"""
def _A ( _a : float , _a : float , _a : float , _a : float , _a : float , ):
"""simple docstring"""
A = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
A = 1 - (matter_density + radiation_density + dark_energy)
A = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
A = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
UpperCAmelCase =0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 255 | 1 |
def _A ( _lowercase = 10_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 2**power
__UpperCamelCase = str(_lowercase )
__UpperCamelCase = list(_lowercase )
__UpperCamelCase = 0
for i in list_num:
sum_of_num += int(_lowercase )
return sum_of_num
if __name__ == "__main__":
__snake_case = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
__snake_case = solution(power)
print('''Sum of the digits is: ''', result)
| 1 |
import os
from distutils.util import strtobool
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
for e in env_keys:
lowercase = int(os.environ.get(lowerCAmelCase__ ,-1 ) )
if val >= 0:
return val
return default
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ):
lowercase = os.environ.get(lowerCAmelCase__ ,str(lowerCAmelCase__ ) )
return strtobool(lowerCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__="no" ):
lowercase = os.environ.get(lowerCAmelCase__ ,str(lowerCAmelCase__ ) )
return value
| 428 | 0 |
from __future__ import annotations
from random import random
class __magic_name__ :
'''simple docstring'''
def __init__( self: Tuple , _lowerCamelCase: int | None = None ):
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = random()
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
def __repr__( self: Optional[Any] ):
from pprint import pformat
if self.left is None and self.right is None:
return f"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{f"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 )
def __str__( self: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = str(self.value ) + ''' '''
SCREAMING_SNAKE_CASE_ = str(self.left or '''''' )
SCREAMING_SNAKE_CASE_ = str(self.right or '''''' )
return value + left + right
def a (_lowerCAmelCase , _lowerCAmelCase ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = split(root.left , _lowerCAmelCase )
return left, root
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = split(root.right , _lowerCAmelCase )
return root, right
def a (_lowerCAmelCase , _lowerCAmelCase ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
SCREAMING_SNAKE_CASE_ = merge(left.right , _lowerCAmelCase )
return left
else:
SCREAMING_SNAKE_CASE_ = merge(_lowerCAmelCase , right.left )
return right
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = Node(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = split(_lowerCAmelCase , _lowerCAmelCase )
return merge(merge(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = split(_lowerCAmelCase , value - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = split(_lowerCAmelCase , _lowerCAmelCase )
return merge(_lowerCAmelCase , _lowerCAmelCase )
def a (_lowerCAmelCase ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def a (_lowerCAmelCase , _lowerCAmelCase ):
for arg in args.split():
if arg[0] == "+":
SCREAMING_SNAKE_CASE_ = insert(_lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
SCREAMING_SNAKE_CASE_ = erase(_lowerCAmelCase , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def a ():
SCREAMING_SNAKE_CASE_ = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
SCREAMING_SNAKE_CASE_ = input()
while args != "q":
SCREAMING_SNAKE_CASE_ = interact_treap(_lowerCAmelCase , _lowerCAmelCase )
print(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self: Union[str, Any] , _lowerCamelCase: List[str] , _lowerCamelCase: Union[str, Any]=7 , _lowerCamelCase: int=3 , _lowerCamelCase: Optional[int]=18 , _lowerCamelCase: Optional[Any]=30 , _lowerCamelCase: Any=4_00 , _lowerCamelCase: List[str]=True , _lowerCamelCase: str=None , _lowerCamelCase: Union[str, Any]=True , _lowerCamelCase: Union[str, Any]=False , _lowerCamelCase: int=True , _lowerCamelCase: Any=True , _lowerCamelCase: List[str]=[0.5, 0.5, 0.5] , _lowerCamelCase: Dict=[0.5, 0.5, 0.5] , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size if size is not None else {'''height''': 18, '''width''': 20}
SCREAMING_SNAKE_CASE_ = do_thumbnail
SCREAMING_SNAKE_CASE_ = do_align_axis
SCREAMING_SNAKE_CASE_ = do_pad
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
def _A ( self: str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = DonutImageProcessor if is_vision_available() else None
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = DonutImageProcessingTester(self )
@property
def _A ( self: List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self: Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
def _A ( self: List[str] ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _A ( self: List[Any] ):
pass
@is_flaky()
def _A ( self: int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _A ( self: Optional[Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _A ( self: List[str] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 89 | 1 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_a : str = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_a : str = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_a : int = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_a : int = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_a : Optional[int] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_a : Union[str, Any] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_a : Any = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def _a () -> str:
"""simple docstring"""
__snake_case , __snake_case = randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
__snake_case = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
__snake_case , __snake_case = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _a (lowercase__ : int = 1_0_0 ) -> Optional[int]:
"""simple docstring"""
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize('hand, expected' , lowercase__ )
def _a (lowercase__ : Optional[int] , lowercase__ : Any ) -> Dict:
"""simple docstring"""
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , lowercase__ )
def _a (lowercase__ : int , lowercase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , lowercase__ )
def _a (lowercase__ : int , lowercase__ : Tuple , lowercase__ : List[str] ) -> str:
"""simple docstring"""
__snake_case = PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , lowercase__ )
def _a (lowercase__ : str , lowercase__ : int ) -> Any:
"""simple docstring"""
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , lowercase__ )
def _a (lowercase__ : List[str] , lowercase__ : str ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , lowercase__ )
def _a (lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> str:
"""simple docstring"""
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : List[str] ) -> str:
"""simple docstring"""
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def _a () -> Any:
"""simple docstring"""
__snake_case = [PokerHand(lowercase__ ) for hand in SORTED_HANDS]
__snake_case = poker_hands.copy()
shuffle(lowercase__ )
__snake_case = chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def _a () -> str:
"""simple docstring"""
# Test that five high straights are compared correctly.
__snake_case = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _a () -> Dict:
"""simple docstring"""
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__snake_case = PokerHand('2C 4S AS 3D 5C' )
__snake_case = True
__snake_case = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _a () -> Any:
"""simple docstring"""
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__snake_case = 0
__snake_case = os.path.abspath(os.path.dirname(lowercase__ ) )
__snake_case = os.path.join(lowercase__ , 'poker_hands.txt' )
with open(lowercase__ ) as file_hand:
for line in file_hand:
__snake_case = line[:1_4].strip()
__snake_case = line[1_5:].strip()
__snake_case , __snake_case = PokerHand(lowercase__ ), PokerHand(lowercase__ )
__snake_case = player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 56 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_lengths
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = gelu_activation
SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ = causal
SCREAMING_SNAKE_CASE__ = asm
SCREAMING_SNAKE_CASE__ = n_langs
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_special
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = summary_type
SCREAMING_SNAKE_CASE__ = use_proj
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self :int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__A )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__A )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(config=__A )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE__ = torch.jit.trace(
__A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) )
SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A )
loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) ) | 6 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a__ : Dict = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def _lowerCAmelCase ( A__ ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def _lowerCAmelCase ( A__ ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowercase__ = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 703 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = None
A : Optional[int] = None
@property
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size'))
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(lowerCAmelCase , 'padding_value'))
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name])))
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest')
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to smallest with np
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to middle
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = min(lowerCAmelCase)
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 642 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
__a = tempfile.mkdtemp()
__a = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__a = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
"""do_convert_rgb""": True,
}
__a = os.path.join(self.tmpdirname , __A )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__A , __A )
def snake_case_ ( self , **__A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__A )
def snake_case_ ( self , **__A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__A )
def snake_case_ ( self , **__A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__A )
def snake_case_ ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ):
__a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self ):
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = self.get_image_processor()
__a = ChineseCLIPProcessor(tokenizer=__A , image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
__a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__A )
__a = ChineseCLIPProcessor(tokenizer=__A , image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
__a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __A )
self.assertIsInstance(processor_fast.tokenizer , __A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __A )
self.assertIsInstance(processor_fast.image_processor , __A )
def snake_case_ ( self ):
__a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
__a = self.get_image_processor(do_normalize=__A )
__a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=__A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def snake_case_ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = ChineseCLIPProcessor(tokenizer=__A , image_processor=__A )
__a = self.prepare_image_inputs()
__a = image_processor(__A , return_tensors="""np""" )
__a = processor(images=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = ChineseCLIPProcessor(tokenizer=__A , image_processor=__A )
__a = """Alexandra,T-shirt的价格是15便士。"""
__a = processor(text=__A )
__a = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = ChineseCLIPProcessor(tokenizer=__A , image_processor=__A )
__a = """Alexandra,T-shirt的价格是15便士。"""
__a = self.prepare_image_inputs()
__a = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def snake_case_ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = ChineseCLIPProcessor(tokenizer=__A , image_processor=__A )
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(__A )
__a = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def snake_case_ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = ChineseCLIPProcessor(tokenizer=__A , image_processor=__A )
__a = """Alexandra,T-shirt的价格是15便士。"""
__a = self.prepare_image_inputs()
__a = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 99 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[Any] = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 328 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _UpperCAmelCase ( a : int , a : int , a : float = 1 / sqrt(2 ) ):
snake_case__ = tau * frequency / samplerate
snake_case__ = sin(snake_case_ )
snake_case__ = cos(snake_case_ )
snake_case__ = _sin / (2 * q_factor)
snake_case__ = (1 - _cos) / 2
snake_case__ = 1 - _cos
snake_case__ = 1 + alpha
snake_case__ = -2 * _cos
snake_case__ = 1 - alpha
snake_case__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float = 1 / sqrt(2 ) ):
snake_case__ = tau * frequency / samplerate
snake_case__ = sin(snake_case_ )
snake_case__ = cos(snake_case_ )
snake_case__ = _sin / (2 * q_factor)
snake_case__ = (1 + _cos) / 2
snake_case__ = -1 - _cos
snake_case__ = 1 + alpha
snake_case__ = -2 * _cos
snake_case__ = 1 - alpha
snake_case__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float = 1 / sqrt(2 ) ):
snake_case__ = tau * frequency / samplerate
snake_case__ = sin(snake_case_ )
snake_case__ = cos(snake_case_ )
snake_case__ = _sin / (2 * q_factor)
snake_case__ = _sin / 2
snake_case__ = 0
snake_case__ = -ba
snake_case__ = 1 + alpha
snake_case__ = -2 * _cos
snake_case__ = 1 - alpha
snake_case__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float = 1 / sqrt(2 ) ):
snake_case__ = tau * frequency / samplerate
snake_case__ = sin(snake_case_ )
snake_case__ = cos(snake_case_ )
snake_case__ = _sin / (2 * q_factor)
snake_case__ = 1 - alpha
snake_case__ = -2 * _cos
snake_case__ = 1 + alpha
snake_case__ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float , a : float = 1 / sqrt(2 ) , ):
snake_case__ = tau * frequency / samplerate
snake_case__ = sin(snake_case_ )
snake_case__ = cos(snake_case_ )
snake_case__ = _sin / (2 * q_factor)
snake_case__ = 10 ** (gain_db / 40)
snake_case__ = 1 + alpha * big_a
snake_case__ = -2 * _cos
snake_case__ = 1 - alpha * big_a
snake_case__ = 1 + alpha / big_a
snake_case__ = -2 * _cos
snake_case__ = 1 - alpha / big_a
snake_case__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float , a : float = 1 / sqrt(2 ) , ):
snake_case__ = tau * frequency / samplerate
snake_case__ = sin(snake_case_ )
snake_case__ = cos(snake_case_ )
snake_case__ = _sin / (2 * q_factor)
snake_case__ = 10 ** (gain_db / 40)
snake_case__ = (big_a + 1) - (big_a - 1) * _cos
snake_case__ = (big_a + 1) + (big_a - 1) * _cos
snake_case__ = (big_a - 1) - (big_a + 1) * _cos
snake_case__ = (big_a - 1) + (big_a + 1) * _cos
snake_case__ = 2 * sqrt(snake_case_ ) * alpha
snake_case__ = big_a * (pmc + aaa)
snake_case__ = 2 * big_a * mpc
snake_case__ = big_a * (pmc - aaa)
snake_case__ = ppmc + aaa
snake_case__ = -2 * pmpc
snake_case__ = ppmc - aaa
snake_case__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCAmelCase ( a : int , a : int , a : float , a : float = 1 / sqrt(2 ) , ):
snake_case__ = tau * frequency / samplerate
snake_case__ = sin(snake_case_ )
snake_case__ = cos(snake_case_ )
snake_case__ = _sin / (2 * q_factor)
snake_case__ = 10 ** (gain_db / 40)
snake_case__ = (big_a + 1) - (big_a - 1) * _cos
snake_case__ = (big_a + 1) + (big_a - 1) * _cos
snake_case__ = (big_a - 1) - (big_a + 1) * _cos
snake_case__ = (big_a - 1) + (big_a + 1) * _cos
snake_case__ = 2 * sqrt(snake_case_ ) * alpha
snake_case__ = big_a * (ppmc + aaa)
snake_case__ = -2 * big_a * pmpc
snake_case__ = big_a * (ppmc - aaa)
snake_case__ = pmc + aaa
snake_case__ = 2 * mpc
snake_case__ = pmc - aaa
snake_case__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 718 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _UpperCAmelCase ( a : int=32 , a : int=10 , a : Tuple=100 , a : str=1026 , a : Union[str, Any]=True , a : Optional[int]="data/tokenized_stories_train_wikitext103.jbl" , a : Tuple="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
snake_case__ , snake_case__ = generate_datasets(
a , a , number=a , min_len=1026 , trim=a )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
snake_case__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
snake_case__ = load_gpta("""gpt2""" ).to(a )
print("""computing perplexity on objective set""" )
snake_case__ = compute_perplexity(a , a , a ).item()
print("""perplexity on objective set:""" , a )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a , a , a , a , a , a , a , a )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _UpperCAmelCase ( a : int , a : Dict=15 , a : Optional[int]=128 , a : str=100 , a : Optional[int]="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
snake_case__ = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
snake_case__ = SecondaryLearner(a )
# Train secondary learner
snake_case__ = train_secondary_learner(
a , a , max_epochs=a , batch_size=a , eval_freq=100 , igf_model_path=a , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _UpperCAmelCase ( a : Tuple , a : List[Any] , a : str , a : List[Any]=32 , a : Any=1000 , a : str=16 , a : Tuple=1.0 , a : str=recopy_gpta , a : Any=None , a : Optional[int]=10 , a : Tuple="gpt2_finetuned.pt" , ):
snake_case__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
snake_case__ = RandomSampler(a )
snake_case__ = DataLoader(a , sampler=a )
snake_case__ = max_steps // (len(a )) + 1
snake_case__ = 0
snake_case__ = torch.zeros((1, context_len) , dtype=torch.long , device=a )
snake_case__ , snake_case__ , snake_case__ = recopy_model(a , a , a )
model.train()
if secondary_learner is not None:
secondary_learner.to(a )
secondary_learner.eval()
snake_case__ = []
snake_case__ = 0
snake_case__ = []
snake_case__ = []
# Compute the performance of the transformer model at the beginning
snake_case__ = compute_perplexity(a , a , a )
test_perps.append(a )
print("""Test perplexity, step""" , a , """:""" , a )
for epoch in range(int(a ) ):
for step, example in enumerate(a ):
torch.cuda.empty_cache()
snake_case__ = random.randint(0 , example.size(2 ) - context_len - 1 )
snake_case__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
snake_case__ = model(a , labels=a )
snake_case__ = True
if secondary_learner is not None:
snake_case__ = secondary_learner.forward(
torch.tensor(a , dtype=torch.long , device=a ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
snake_case__ = -1
if predicted_q < threshold:
snake_case__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
snake_case__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
snake_case__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
snake_case__ = compute_perplexity(a , a , a )
test_perps.append(a )
print("""Test perplexity, step""" , a , """:""" , a )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , a )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _UpperCAmelCase ( ):
snake_case__ = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=a , type=a , required=a , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=a , type=a , required=a , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=a , default=a , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=a , default=a , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=a , type=a , required=a , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=a , type=a , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=a , default=a , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=a , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=a , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1000 , type=a , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=a , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=a , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=a , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=a , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1026 , type=a , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=a , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=a , type=a , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=a , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=a , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=a , type=a , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=a , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
snake_case__ = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
snake_case__ = training_secondary_learner(
a , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
snake_case__ = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
snake_case__ , snake_case__ = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1026 , trim=a )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a , a , a , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=a , secondary_learner=a , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 99 | 0 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : int, _lowerCAmelCase : Optional[Any] ) -> List[Any]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_UpperCAmelCase : Tuple = TapasConfig.from_json_file(_lowerCAmelCase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase : Dict = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase : Tuple = TapasForQuestionAnswering(config=_lowerCAmelCase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase : int = 4
_UpperCAmelCase : Union[str, Any] = True
# hparam_utils.py hparams
_UpperCAmelCase : int = 0.664694
_UpperCAmelCase : Dict = 0.207951
_UpperCAmelCase : Union[str, Any] = 0.121194
_UpperCAmelCase : Dict = True
_UpperCAmelCase : Dict = True
_UpperCAmelCase : int = False
_UpperCAmelCase : Optional[Any] = 0.0352513
_UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=_lowerCAmelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase : Tuple = 4
_UpperCAmelCase : List[str] = False
# hparam_utils.py hparams
_UpperCAmelCase : Optional[int] = 36.4519
_UpperCAmelCase : Optional[Any] = 0.903421
_UpperCAmelCase : Union[str, Any] = 222.088
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : int = 0.763141
_UpperCAmelCase : Union[str, Any] = TapasForQuestionAnswering(config=_lowerCAmelCase )
elif task == "TABFACT":
_UpperCAmelCase : Optional[int] = TapasForSequenceClassification(config=_lowerCAmelCase )
elif task == "MLM":
_UpperCAmelCase : Union[str, Any] = TapasForMaskedLM(config=_lowerCAmelCase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase : List[Any] = TapasModel(config=_lowerCAmelCase )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_lowerCAmelCase )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
_UpperCAmelCase : Any = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""", model_max_length=512 )
tokenizer.save_pretrained(_lowerCAmelCase )
print("""Used relative position embeddings:""", model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 238 |
"""simple docstring"""
import argparse
import datetime
def UpperCamelCase ( _lowerCAmelCase : str ) -> str:
_UpperCAmelCase : List[str] = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
_UpperCAmelCase : List[str] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_lowerCAmelCase ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
_UpperCAmelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
_UpperCAmelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
_UpperCAmelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
_UpperCAmelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
_UpperCAmelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
_UpperCAmelCase : List[str] = datetime.date(int(_lowerCAmelCase ), int(_lowerCAmelCase ), int(_lowerCAmelCase ) )
# Start math
if m <= 2:
_UpperCAmelCase : int = y - 1
_UpperCAmelCase : List[str] = m + 12
# maths var
_UpperCAmelCase : int = int(str(_lowerCAmelCase )[:2] )
_UpperCAmelCase : int = int(str(_lowerCAmelCase )[2:] )
_UpperCAmelCase : int = int(2.6 * m - 5.39 )
_UpperCAmelCase : int = int(c / 4 )
_UpperCAmelCase : int = int(k / 4 )
_UpperCAmelCase : int = int(d + k )
_UpperCAmelCase : int = int(t + u + v + x )
_UpperCAmelCase : int = int(z - (2 * c) )
_UpperCAmelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
_UpperCAmelCase : str = f'''Your date {date_input}, is a {days[str(_lowerCAmelCase )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
lowerCamelCase__ : int = parser.parse_args()
zeller(args.date_input)
| 238 | 1 |
'''simple docstring'''
import cva
import numpy as np
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> str:
'''simple docstring'''
if k in (0.0_4, 0.0_6):
__lowercase = k
__lowercase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__(self ) -> str:
'''simple docstring'''
return str(self.k )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
__lowercase = cva.imread(_lowerCamelCase ,0 )
__lowercase , __lowercase = img.shape
__lowercase = []
__lowercase = img.copy()
__lowercase = cva.cvtColor(_lowerCamelCase ,cva.COLOR_GRAY2RGB )
__lowercase , __lowercase = np.gradient(_lowerCamelCase )
__lowercase = dx**2
__lowercase = dy**2
__lowercase = dx * dy
__lowercase = 0.0_4
__lowercase = self.window_size // 2
for y in range(_lowerCamelCase ,h - offset ):
for x in range(_lowerCamelCase ,w - offset ):
__lowercase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase = (wxx * wyy) - (wxy**2)
__lowercase = wxx + wyy
__lowercase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,255 )
return color_img, corner_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = HarrisCorner(0.0_4, 3)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 56 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__lowercase = dict(zip(_lowerCamelCase ,range(len(_lowerCamelCase ) ) ) )
__lowercase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__lowercase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,_lowerCamelCase )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
# load decoder from hub
__lowercase = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowerCamelCase ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor(_lowerCamelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = '''This is a test string'''
__lowercase = processor(text=_lowerCamelCase )
__lowercase = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _UpperCAmelCase (self ,_lowerCamelCase=(2, 10, 16) ,_lowerCamelCase=77 ) -> Optional[int]:
'''simple docstring'''
np.random.seed(_lowerCamelCase )
return np.random.rand(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__lowercase = processor.decode(_lowerCamelCase )
__lowercase = decoder.decode_beams(_lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_lowerCamelCase )
else:
with get_context(_lowerCamelCase ).Pool() as pool:
__lowercase = processor.batch_decode(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_lowerCamelCase ,_lowerCamelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCamelCase ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(_lowerCamelCase ,decoded_processor.lm_score )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -2_0.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,beam_width=_lowerCamelCase ,beam_prune_logp=_lowerCamelCase ,token_min_logp=_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_lowerCamelCase )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,_lowerCamelCase ,atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,_lowerCamelCase ,atol=1E-3 ) )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -2_0.0
__lowercase = True
__lowercase = processor.batch_decode(
_lowerCamelCase ,alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
__lowercase = decoded_processor_out.text
__lowercase = list(_lowerCamelCase )
decoder.reset_params(
alpha=_lowerCamelCase ,beta=_lowerCamelCase ,unk_score_offset=_lowerCamelCase ,lm_score_boundary=_lowerCamelCase ,)
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_lowerCamelCase ,_lowerCamelCase ,)
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCamelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_lowerCamelCase )
__lowercase = os.listdir(_lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = floats_list((3, 1000) )
__lowercase = processor_wavaveca(_lowerCamelCase ,return_tensors='''np''' )
__lowercase = processor_auto(_lowerCamelCase ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_lowerCamelCase )
__lowercase = processor_auto.batch_decode(_lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase ,decoder=_lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_lowerCamelCase ,output_word_offsets=_lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowerCamelCase ,_lowerCamelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
import torch
__lowercase = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_lowerCamelCase )
__lowercase = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16000 ) )
__lowercase = iter(_lowerCamelCase )
__lowercase = next(_lowerCamelCase )
__lowercase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowercase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowercase = model(_lowerCamelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] ,output_word_offsets=_lowerCamelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__lowercase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,_lowerCamelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowerCamelCase ,'''word''' ) ) ,output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''start_time''' ) )
__lowercase = torch.tensor(self.get_from_offsets(_lowerCamelCase ,'''end_time''' ) )
# fmt: off
__lowercase = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__lowercase = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=0.0_1 ) )
| 56 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class A_ ( __a , __a ):
_A :Optional[int] = 1
@register_to_config
def __init__( self : int , snake_case__ : int = 10_00 , snake_case__ : Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(snake_case__ )
# standard deviation of the initial noise distribution
lowercase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase = 4
# running values
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : int , snake_case__ : Union[str, torch.device] = None ):
lowercase = num_inference_steps
lowercase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase = torch.sin(steps * math.pi / 2 ) ** 2
lowercase = (1.0 - self.betas**2) ** 0.5
lowercase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase = timesteps.to(snake_case__ )
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
lowercase = (self.timesteps == timestep).nonzero().item()
lowercase = timestep_index + 1
lowercase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(snake_case__ )
if len(self.ets ) == 1:
lowercase = self.ets[-1]
elif len(self.ets ) == 2:
lowercase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase = self._get_prev_sample(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : torch.FloatTensor , *snake_case__ : Dict , **snake_case__ : Dict ):
return sample
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Dict , snake_case__ : int , snake_case__ : Any , snake_case__ : Optional[int] ):
lowercase = self.alphas[timestep_index]
lowercase = self.betas[timestep_index]
lowercase = self.alphas[prev_timestep_index]
lowercase = self.betas[prev_timestep_index]
lowercase = (sample - sigma * ets) / max(snake_case__ , 1E-8 )
lowercase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : List[str] ):
return self.config.num_train_timesteps
| 428 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class A_ ( __a , __a , unittest.TestCase ):
_A :List[Any] = VQModel
_A :Any = '''sample'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : int=(32, 32) ):
lowercase = 4
lowercase = 3
lowercase = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return (3, 32, 32)
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
lowercase = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ):
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowercase , lowercase = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case__ )
lowercase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(snake_case__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowercase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowercase = image.to(snake_case__ )
with torch.no_grad():
lowercase = model(snake_case__ ).sample
lowercase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
| 428 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCAmelCase ( A__ , A__=0.9_99 , A__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowercase__ = []
for i in range(a_ ):
lowercase__ = i / num_diffusion_timesteps
lowercase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a_ ) / alpha_bar_fn(a_ ) , a_ ) )
return torch.tensor(a_ , dtype=torch.floataa )
class UpperCAmelCase__( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
A : int = [e.name for e in KarrasDiffusionSchedulers]
A : Dict = 2
@register_to_config
def __init__( self : Optional[int] , lowerCAmelCase : Dict = 10_00 , lowerCAmelCase : Tuple = 0.0_00_85 , lowerCAmelCase : List[str] = 0.0_12 , lowerCAmelCase : Optional[int] = "linear" , lowerCAmelCase : List[str] = None , lowerCAmelCase : Any = "epsilon" , lowerCAmelCase : List[Any] = "linspace" , lowerCAmelCase : List[str] = 0 , ) -> Optional[int]:
"""simple docstring"""
if trained_betas is not None:
lowercase__ = torch.tensor(_UpperCAmelCase , dtype=torch.floataa)
elif beta_schedule == "linear":
lowercase__ = torch.linspace(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _UpperCAmelCase , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase__ = betas_for_alpha_bar(_UpperCAmelCase)
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''')
lowercase__ = 1.0 - self.betas
lowercase__ = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int]=None) -> str:
"""simple docstring"""
if schedule_timesteps is None:
lowercase__ = self.timesteps
lowercase__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
lowercase__ = 1 if len(_UpperCAmelCase) > 1 else 0
else:
lowercase__ = timestep.cpu().item() if torch.is_tensor(_UpperCAmelCase) else timestep
lowercase__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , ) -> Dict:
"""simple docstring"""
lowercase__ = self.index_for_timestep(_UpperCAmelCase)
if self.state_in_first_order:
lowercase__ = self.sigmas[step_index]
else:
lowercase__ = self.sigmas_interpol[step_index]
lowercase__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] = None , lowerCAmelCase : Optional[Any] = None , ) -> List[Any]:
"""simple docstring"""
lowercase__ = num_inference_steps
lowercase__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowercase__ = np.linspace(0 , num_train_timesteps - 1 , _UpperCAmelCase , dtype=_UpperCAmelCase)[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowercase__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase__ = (np.arange(0 , _UpperCAmelCase) * step_ratio).round()[::-1].copy().astype(_UpperCAmelCase)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowercase__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase__ = (np.arange(_UpperCAmelCase , 0 , -step_ratio)).round().copy().astype(_UpperCAmelCase)
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''')
lowercase__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
lowercase__ = torch.from_numpy(np.log(_UpperCAmelCase)).to(_UpperCAmelCase)
lowercase__ = np.interp(_UpperCAmelCase , np.arange(0 , len(_UpperCAmelCase)) , _UpperCAmelCase)
lowercase__ = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
lowercase__ = torch.from_numpy(_UpperCAmelCase).to(device=_UpperCAmelCase)
# interpolate sigmas
lowercase__ = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp()
lowercase__ = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
lowercase__ = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]])
if str(_UpperCAmelCase).startswith('mps'):
# mps does not support float64
lowercase__ = torch.from_numpy(_UpperCAmelCase).to(_UpperCAmelCase , dtype=torch.floataa)
else:
lowercase__ = torch.from_numpy(_UpperCAmelCase).to(_UpperCAmelCase)
# interpolate timesteps
lowercase__ = self.sigma_to_t(_UpperCAmelCase).to(_UpperCAmelCase , dtype=timesteps.dtype)
lowercase__ = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten()
lowercase__ = torch.cat([timesteps[:1], interleaved_timesteps])
lowercase__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowercase__ = defaultdict(_UpperCAmelCase)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
lowercase__ = sigma.log()
# get distribution
lowercase__ = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowercase__ = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
lowercase__ = low_idx + 1
lowercase__ = self.log_sigmas[low_idx]
lowercase__ = self.log_sigmas[high_idx]
# interpolate sigmas
lowercase__ = (low - log_sigma) / (low - high)
lowercase__ = w.clamp(0 , 1)
# transform interpolation to time range
lowercase__ = (1 - w) * low_idx + w * high_idx
lowercase__ = t.view(sigma.shape)
return t
@property
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
return self.sample is None
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] = True , ) -> int:
"""simple docstring"""
lowercase__ = self.index_for_timestep(_UpperCAmelCase)
# advance index counter by 1
lowercase__ = timestep.cpu().item() if torch.is_tensor(_UpperCAmelCase) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowercase__ = self.sigmas[step_index]
lowercase__ = self.sigmas_interpol[step_index + 1]
lowercase__ = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowercase__ = self.sigmas[step_index - 1]
lowercase__ = self.sigmas_interpol[step_index]
lowercase__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowercase__ = 0
lowercase__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowercase__ = sigma_hat if self.state_in_first_order else sigma_interpol
lowercase__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = sigma_hat if self.state_in_first_order else sigma_interpol
lowercase__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample')
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''')
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowercase__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowercase__ = sigma_interpol - sigma_hat
# store for 2nd order step
lowercase__ = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowercase__ = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowercase__ = sigma_next - sigma_hat
lowercase__ = self.sample
lowercase__ = None
lowercase__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(_UpperCAmelCase):
# mps does not support float64
lowercase__ = self.timesteps.to(original_samples.device , dtype=torch.floataa)
lowercase__ = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
lowercase__ = self.timesteps.to(original_samples.device)
lowercase__ = timesteps.to(original_samples.device)
lowercase__ = [self.index_for_timestep(_UpperCAmelCase , _UpperCAmelCase) for t in timesteps]
lowercase__ = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
lowercase__ = sigma.unsqueeze(-1)
lowercase__ = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[str]) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps
| 713 |
def _lowerCAmelCase ( A__ , A__ , A__ ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
lowercase__ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase__ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642 | 0 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _A :
'''simple docstring'''
@staticmethod
def snake_case_ ( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Any = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ ,image_processor=SCREAMING_SNAKE_CASE_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ,threshold=0.0 )
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) ,0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,{
"""score""": ANY(SCREAMING_SNAKE_CASE_ ),
"""label""": ANY(SCREAMING_SNAKE_CASE_ ),
"""box""": {"""xmin""": ANY(SCREAMING_SNAKE_CASE_ ), """ymin""": ANY(SCREAMING_SNAKE_CASE_ ), """xmax""": ANY(SCREAMING_SNAKE_CASE_ ), """ymax""": ANY(SCREAMING_SNAKE_CASE_ )},
} ,)
import datasets
snake_case : Any = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" ,"""image""" ,split="""test""" )
snake_case : Any = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
snake_case : Union[str, Any] = object_detector(SCREAMING_SNAKE_CASE_ ,threshold=0.0 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(SCREAMING_SNAKE_CASE_ ) )
for outputs in batch_outputs:
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) ,0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,{
"""score""": ANY(SCREAMING_SNAKE_CASE_ ),
"""label""": ANY(SCREAMING_SNAKE_CASE_ ),
"""box""": {"""xmin""": ANY(SCREAMING_SNAKE_CASE_ ), """ymin""": ANY(SCREAMING_SNAKE_CASE_ ), """xmax""": ANY(SCREAMING_SNAKE_CASE_ ), """ymax""": ANY(SCREAMING_SNAKE_CASE_ )},
} ,)
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
snake_case : Optional[int] = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case : str = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ,threshold=0.0 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] ,)
snake_case : Optional[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] ,threshold=0.0 ,)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
[
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] ,)
@require_torch
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = """facebook/detr-resnet-50"""
snake_case : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case : Any = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] ,)
snake_case : str = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] ,)
@require_torch
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = """facebook/detr-resnet-50"""
snake_case : str = pipeline("""object-detection""" ,model=SCREAMING_SNAKE_CASE_ )
snake_case : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] ,)
snake_case : List[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] ,)
@require_torch
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = 0.99_85
snake_case : List[Any] = """facebook/detr-resnet-50"""
snake_case : Any = pipeline("""object-detection""" ,model=SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ,threshold=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] ,)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = """Narsil/layoutlmv3-finetuned-funsd"""
snake_case : Dict = 0.99_93
snake_case : Any = pipeline("""object-detection""" ,model=SCREAMING_SNAKE_CASE_ ,threshold=SCREAMING_SNAKE_CASE_ )
snake_case : Dict = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
{"""score""": 0.99_93, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.99_93, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] ,)
| 36 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36 | 1 |
def UpperCamelCase_( __magic_name__ : int = 50 ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''') | 382 |
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = [0 for i in range(len(__magic_name__ ) )]
# initialize interval's left pointer and right pointer
_lowerCAmelCase , _lowerCAmelCase :List[Any] = 0, 0
for i in range(1 , len(__magic_name__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
_lowerCAmelCase :Any = min(right_pointer - i + 1 , z_result[i - left_pointer] )
_lowerCAmelCase :Any = min_edge
while go_next(__magic_name__ , __magic_name__ , __magic_name__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_lowerCAmelCase , _lowerCAmelCase :List[Any] = i, i + z_result[i] - 1
return z_result
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : str ):
"""simple docstring"""
return i + z_result[i] < len(__magic_name__ ) and s[z_result[i]] == s[i + z_result[i]]
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :int = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_lowerCAmelCase :Optional[Any] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__magic_name__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 382 | 1 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> str:
super().__init__(
features=SCREAMING_SNAKE_CASE_, cache_dir=SCREAMING_SNAKE_CASE_, keep_in_memory=SCREAMING_SNAKE_CASE_, streaming=SCREAMING_SNAKE_CASE_, num_proc=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : List[str] = Generator(
cache_dir=SCREAMING_SNAKE_CASE_, features=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, gen_kwargs=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
def snake_case_ ( self ) -> Union[str, Any]:
# Build iterable dataset
if self.streaming:
UpperCamelCase : Tuple = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
UpperCamelCase : List[Any] = None
UpperCamelCase : Tuple = None
UpperCamelCase : Any = None
UpperCamelCase : List[str] = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE_, download_mode=SCREAMING_SNAKE_CASE_, verification_mode=SCREAMING_SNAKE_CASE_, base_path=SCREAMING_SNAKE_CASE_, num_proc=self.num_proc, )
UpperCamelCase : List[str] = self.builder.as_dataset(
split='train', verification_mode=SCREAMING_SNAKE_CASE_, in_memory=self.keep_in_memory )
return dataset
| 40 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
def __init__( self : str , a__ : List[Any] , a__ : Dict=13 , a__ : int=7 , a__ : Tuple=True , a__ : List[Any]=True , a__ : int=True , a__ : int=True , a__ : List[Any]=99 , a__ : Tuple=32 , a__ : Optional[Any]=5 , a__ : Optional[int]=4 , a__ : Any=37 , a__ : Union[str, Any]="gelu" , a__ : Union[str, Any]=0.1 , a__ : List[Any]=0.1 , a__ : Optional[int]=512 , a__ : str=16 , a__ : str=2 , a__ : Any=0.02 , a__ : Tuple=3 , a__ : List[Any]=4 , a__ : Optional[Any]=None , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def snake_case__ ( self : List[Any] ):
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Tuple ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
def snake_case__ ( self : Any , a__ : Any , a__ : Tuple , a__ : str , a__ : List[Any] , a__ : int , a__ : str , a__ : Tuple ):
__magic_name__ = NystromformerModel(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ , attention_mask=a__ , token_type_ids=a__ )
__magic_name__ = model(a__ , token_type_ids=a__ )
__magic_name__ = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : List[str] , a__ : Any , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Optional[Any] , a__ : Optional[Any] , a__ : int ):
__magic_name__ = NystromformerForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : List[Any] , a__ : List[str] , a__ : int , a__ : List[str] , a__ : int , a__ : Dict , a__ : Optional[int] , a__ : Any ):
__magic_name__ = NystromformerForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Optional[int] , a__ : List[Any] , a__ : List[str] , a__ : List[Any] , a__ : Tuple , a__ : Any , a__ : Tuple , a__ : List[Any] ):
__magic_name__ = self.num_labels
__magic_name__ = NystromformerForSequenceClassification(a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Optional[int] , a__ : str , a__ : Union[str, Any] , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : Tuple ):
__magic_name__ = self.num_labels
__magic_name__ = NystromformerForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Any , a__ : str , a__ : Any , a__ : List[Any] , a__ : Optional[Any] , a__ : Dict , a__ : Optional[int] , a__ : Dict ):
__magic_name__ = self.num_choices
__magic_name__ = NystromformerForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : str ):
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __a ,__a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE :Optional[int] = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE :int = False
__SCREAMING_SNAKE_CASE :Any = False
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = NystromformerModelTester(self )
__magic_name__ = ConfigTester(self , config_class=a__ , hidden_size=37 )
def snake_case__ ( self : Dict ):
self.config_tester.run_common_tests()
def snake_case__ ( self : Dict ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def snake_case__ ( self : List[Any] ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ = type
self.model_tester.create_and_check_model(*a__ )
def snake_case__ ( self : str ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a__ )
def snake_case__ ( self : str ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
def snake_case__ ( self : Dict ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def snake_case__ ( self : Tuple ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
@slow
def snake_case__ ( self : int ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = NystromformerModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def snake_case__ ( self : str ):
__magic_name__ = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
__magic_name__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
__magic_name__ = model(a__ )[0]
__magic_name__ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , a__ )
__magic_name__ = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1E-4 ) )
@slow
def snake_case__ ( self : str ):
__magic_name__ = '''the [MASK] of Belgium is Brussels'''
__magic_name__ = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
__magic_name__ = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
__magic_name__ = tokenizer(a__ , return_tensors='''pt''' )
with torch.no_grad():
__magic_name__ = model(encoding.input_ids ).logits
__magic_name__ = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(a__ ) , '''capital''' )
| 432 | 0 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> Dict:
"""simple docstring"""
# save results
if os.path.exists(__A ):
if os.path.exists(os.path.join(__A , "config.json" ) ) and os.path.isfile(
os.path.join(__A , "config.json" ) ):
os.remove(os.path.join(__A , "config.json" ) )
if os.path.exists(os.path.join(__A , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(__A , "pytorch_model.bin" ) ):
os.remove(os.path.join(__A , "pytorch_model.bin" ) )
else:
os.makedirs(__A )
model.save_pretrained(__A )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_=False )-> Dict:
"""simple docstring"""
UpperCamelCase = 2
if unlogit:
UpperCamelCase = torch.pow(__A , __A )
UpperCamelCase = p * torch.log(__A )
UpperCamelCase = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( UpperCAmelCase_ )-> Optional[Any]:
"""simple docstring"""
logger.info("lv, h >\t" + "\t".join(F"{x + 1}" for x in range(len(__A ) ) ) )
for row in range(len(__A ) ):
if tensor.dtype != torch.long:
logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:d}" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=None , UpperCAmelCase_=False )-> str:
"""simple docstring"""
UpperCamelCase = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCamelCase = torch.zeros(__A , __A ).to(args.device )
UpperCamelCase = torch.zeros(__A , __A ).to(args.device )
if head_mask is None:
UpperCamelCase = torch.ones(__A , __A ).to(args.device )
head_mask.requires_grad_(requires_grad=__A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCamelCase = None
UpperCamelCase = 0.0
UpperCamelCase = 0.0
for step, inputs in enumerate(tqdm(__A , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCamelCase = tuple(t.to(args.device ) for t in inputs )
(UpperCamelCase ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCamelCase = model(__A , labels=__A , head_mask=__A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCamelCase = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A ):
UpperCamelCase = entropy(attn.detach() , __A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCamelCase = 2
UpperCamelCase = torch.pow(torch.pow(__A , __A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
UpperCamelCase = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(__A )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(__A )
logger.info("Head ranked by importance scores" )
UpperCamelCase = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCamelCase = torch.arange(
head_importance.numel() , device=args.device )
UpperCamelCase = head_ranks.view_as(__A )
print_ad_tensor(__A )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Tuple:
"""simple docstring"""
UpperCamelCase = compute_heads_importance(__A , __A , __A , compute_entropy=__A )
UpperCamelCase = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , __A , original_score * args.masking_threshold )
UpperCamelCase = torch.ones_like(__A )
UpperCamelCase = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCamelCase = original_score
while current_score >= original_score * args.masking_threshold:
UpperCamelCase = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCamelCase = float("Inf" )
UpperCamelCase = head_importance.view(-1 ).sort()[1]
if len(__A ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCamelCase = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCamelCase = new_head_mask.view(-1 )
UpperCamelCase = 0.0
UpperCamelCase = new_head_mask.view_as(__A )
UpperCamelCase = new_head_mask.clone().detach()
print_ad_tensor(__A )
# Compute metric and head importance again
UpperCamelCase = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A )
UpperCamelCase = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info("Final head mask" )
print_ad_tensor(__A )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = datetime.now()
UpperCamelCase = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A )
UpperCamelCase = 1 / loss
UpperCamelCase = datetime.now() - before_time
UpperCamelCase = sum(p.numel() for p in model.parameters() )
UpperCamelCase = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A ) )
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A ):
UpperCamelCase = [
v,
]
assert sum(len(__A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A )
UpperCamelCase = sum(p.numel() for p in model.parameters() )
UpperCamelCase = datetime.now()
UpperCamelCase = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
UpperCamelCase = 1 / loss
UpperCamelCase = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __A , __A , pruned_num_params / original_num_params * 1_00 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , __A , __A )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 1_00 )
save_model(__A , args.output_dir )
def lowerCamelCase__ ( )-> List[Any]:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=__A , type=__A , required=__A , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=__A , type=__A , required=__A , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=__A , type=__A , required=__A , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=__A , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=__A , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=__A , type=__A , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=__A , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=__A , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=__A , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=__A , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=__A , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=__A , help="Batch size." )
parser.add_argument("--seed" , type=__A , default=42 )
parser.add_argument("--local_rank" , type=__A , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=__A , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=__A , default="" , help="Can be used for distant debugging." )
UpperCamelCase = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCamelCase = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCamelCase = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCamelCase = torch.device("cuda" , args.local_rank )
UpperCamelCase = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCamelCase = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCamelCase = nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A )
elif args.n_gpu > 1:
UpperCamelCase = nn.DataParallel(__A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A )
torch.save(__A , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , __A )
# Prepare dataset
UpperCamelCase = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCamelCase = (torch.from_numpy(__A ),)
UpperCamelCase = TensorDataset(*__A )
UpperCamelCase = RandomSampler(__A )
UpperCamelCase = DataLoader(__A , sampler=__A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCamelCase = mask_heads(__A , __A , __A )
prune_heads(__A , __A , __A , __A )
if __name__ == "__main__":
main()
| 715 |
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
SCREAMING_SNAKE_CASE = """Normal"""
if result[0][0] == 1:
SCREAMING_SNAKE_CASE = """Abnormality detected"""
| 556 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : str , snake_case__ : List[Any]=False ):
A = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
A = 'segformer.encoder.' + key
if key.startswith('backbone' ):
A = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A = key[key.find('patch_embed' ) + len('patch_embed' )]
A = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(snake_case__ )-1}' )
if "norm" in key:
A = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
A = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(snake_case__ )-1}' )
if "layer_norm1" in key:
A = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
A = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
A = key[key.find('block' ) + len('block' )]
A = key.replace(F'block{idx}' , F'block.{int(snake_case__ )-1}' )
if "attn.q" in key:
A = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
A = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
A = key.replace('attn' , 'attention.self' )
if "fc1" in key:
A = key.replace('fc1' , 'dense1' )
if "fc2" in key:
A = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
A = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
A = key.replace('linear_fuse.conv' , 'linear_fuse' )
A = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A = key[key.find('linear_c' ) + len('linear_c' )]
A = key.replace(F'linear_c{idx}' , F'linear_c.{int(snake_case__ )-1}' )
if key.startswith('head' ):
A = key.replace('head' , 'classifier' )
A = value
return new_state_dict
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
A = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
A = kv_weight[
: config.hidden_sizes[i], :
]
A = kv_bias[: config.hidden_sizes[i]]
A = kv_weight[
config.hidden_sizes[i] :, :
]
A = kv_bias[
config.hidden_sizes[i] :
]
def _snake_case ( ):
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return image
@torch.no_grad()
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
A = SegformerConfig()
A = False
# set attributes based on model_name
A = 'huggingface/label-files'
if "segformer" in model_name:
A = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
A = 150
A = 'ade20k-id2label.json'
A = (1, 150, 128, 128)
elif "city" in model_name:
A = 19
A = 'cityscapes-id2label.json'
A = (1, 19, 128, 128)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
A = True
A = model_name[4:6]
A = 1000
A = 'imagenet-1k-id2label.json'
A = (1, 1000)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
A = [64, 128, 320, 512]
A = 256
elif size == "b2":
A = [64, 128, 320, 512]
A = 768
A = [3, 4, 6, 3]
elif size == "b3":
A = [64, 128, 320, 512]
A = 768
A = [3, 4, 18, 3]
elif size == "b4":
A = [64, 128, 320, 512]
A = 768
A = [3, 8, 27, 3]
elif size == "b5":
A = [64, 128, 320, 512]
A = 768
A = [3, 6, 40, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
A = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=snake_case__ , align=snake_case__ , do_random_crop=snake_case__ )
# prepare image
A = prepare_img()
A = image_processor(images=snake_case__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
A = torch.load(snake_case__ , map_location=torch.device('cpu' ) )
else:
A = torch.load(snake_case__ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
A = rename_keys(snake_case__ , encoder_only=snake_case__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(snake_case__ , snake_case__ )
# create HuggingFace model and load state dict
if encoder_only:
A = False
A = SegformerForImageClassification(snake_case__ )
else:
A = SegformerForSemanticSegmentation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# forward pass
A = model(snake_case__ )
A = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
A = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
A = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
A = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
A = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
A = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
A = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
A = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
A = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
A = torch.tensor(
[
[
[-1.13_72e01, -1.27_87e01, -1.34_77e01],
[-1.25_36e01, -1.41_94e01, -1.44_09e01],
[-1.32_17e01, -1.48_88e01, -1.53_27e01],
],
[
[-1.47_91e01, -1.71_22e01, -1.82_77e01],
[-1.71_63e01, -1.91_92e01, -1.95_33e01],
[-1.78_97e01, -1.99_91e01, -2.03_15e01],
],
[
[7.67_23e-01, 4.19_21e-01, -7.78_78e-02],
[4.77_72e-01, 9.55_57e-03, -2.80_82e-01],
[3.60_32e-01, -2.48_26e-01, -5.11_68e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
A = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
A = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
A = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
A = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
A = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
A = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
A = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 91 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: str = AudioLDMPipeline
_lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_PARAMS
_lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS
_lowerCamelCase: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=(32, 64) ,class_embed_type='simple_projection' ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=A_ ,)
A = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=A_ ,set_alpha_to_one=A_ ,)
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
A = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,projection_dim=32 ,)
A = ClapTextModelWithProjection(A_ )
A = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' ,model_max_length=77 )
A = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_6000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=A_ ,)
A = SpeechTaHifiGan(A_ )
A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Dict=0 ) -> str:
if str(A_ ).startswith('mps' ):
A = torch.manual_seed(A_ )
else:
A = torch.Generator(device=A_ ).manual_seed(A_ )
A = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = AudioLDMPipeline(**A_ )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = audioldm_pipe(**A_ )
A = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) == 256
A = audio[:10]
A = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.get_dummy_components()
A = AudioLDMPipeline(**A_ )
A = audioldm_pipe.to(A_ )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = 3 * [inputs['prompt']]
# forward
A = audioldm_pipe(**A_ )
A = output.audios[0]
A = self.get_dummy_inputs(A_ )
A = 3 * [inputs.pop('prompt' )]
A = audioldm_pipe.tokenizer(
A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,)
A = text_inputs['input_ids'].to(A_ )
A = audioldm_pipe.text_encoder(
A_ ,)
A = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
A = F.normalize(A_ ,dim=-1 )
A = prompt_embeds
# forward
A = audioldm_pipe(**A_ )
A = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = self.get_dummy_components()
A = AudioLDMPipeline(**A_ )
A = audioldm_pipe.to(A_ )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = 3 * ['this is a negative prompt']
A = negative_prompt
A = 3 * [inputs['prompt']]
# forward
A = audioldm_pipe(**A_ )
A = output.audios[0]
A = self.get_dummy_inputs(A_ )
A = 3 * [inputs.pop('prompt' )]
A = []
for p in [prompt, negative_prompt]:
A = audioldm_pipe.tokenizer(
A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,)
A = text_inputs['input_ids'].to(A_ )
A = audioldm_pipe.text_encoder(
A_ ,)
A = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
A = F.normalize(A_ ,dim=-1 )
embeds.append(A_ )
A , A = embeds
# forward
A = audioldm_pipe(**A_ )
A = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = PNDMScheduler(skip_prk_steps=A_ )
A = AudioLDMPipeline(**A_ )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = 'egg cracking'
A = audioldm_pipe(**A_ ,negative_prompt=A_ )
A = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) == 256
A = audio[:10]
A = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = PNDMScheduler(skip_prk_steps=A_ )
A = AudioLDMPipeline(**A_ )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
A = audioldm_pipe(A_ ,num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
A = 2
A = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
A = 2
A = audioldm_pipe(A_ ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
A = 2
A = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = AudioLDMPipeline(**A_ )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = audioldm_pipe.vocoder.config.sampling_rate
A = self.get_dummy_inputs(A_ )
A = audioldm_pipe(audio_length_in_s=0.0_16 ,**A_ )
A = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) / vocoder_sampling_rate == 0.0_16
A = audioldm_pipe(audio_length_in_s=0.0_32 ,**A_ )
A = output.audios[0]
assert audio.ndim == 1
assert len(A_ ) / vocoder_sampling_rate == 0.0_32
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
A = self.get_dummy_components()
A = AudioLDMPipeline(**A_ )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = ['hey']
A = audioldm_pipe(A_ ,num_inference_steps=1 )
A = output.audios.shape
assert audio_shape == (1, 256)
A = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
A = SpeechTaHifiGan(A_ ).to(A_ )
A = audioldm_pipe(A_ ,num_inference_steps=1 )
A = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
self._test_inference_batch_single_identical(test_mean_pixel_difference=A_ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ,A_ : str="cpu" ,A_ : List[str]=torch.floataa ,A_ : str=0 ) -> List[Any]:
A = torch.Generator(device=A_ ).manual_seed(A_ )
A = np.random.RandomState(A_ ).standard_normal((1, 8, 128, 16) )
A = torch.from_numpy(A_ ).to(device=A_ ,dtype=A_ )
A = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = self.get_inputs(A_ )
A = 25
A = audioldm_pipe(**A_ ).audios[0]
assert audio.ndim == 1
assert len(A_ ) == 8_1920
A = audio[7_7230:7_7240]
A = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
A = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
A = audioldm_pipe.to(A_ )
audioldm_pipe.set_progress_bar_config(disable=A_ )
A = self.get_inputs(A_ )
A = audioldm_pipe(**A_ ).audios[0]
assert audio.ndim == 1
assert len(A_ ) == 8_1920
A = audio[2_7780:2_7790]
A = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
A = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2 | 91 | 1 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCamelCase : Any =logging.get_logger(__name__)
def a__ (__lowercase :Union[str, Any] ) -> Any:
_A : int = R'''\w+[.]\d+'''
_A : Optional[int] = re.findall(__lowercase , __lowercase )
for pat in pats:
_A : Any = key.replace(__lowercase , '''_'''.join(pat.split('''.''' ) ) )
return key
def a__ (__lowercase :List[str] , __lowercase :List[str] , __lowercase :Optional[int] ) -> int:
_A : Tuple = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_A : List[Any] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_A : str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_A : List[str] = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_A : Tuple = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_A : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_A : int = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
_A : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_A : List[Any] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_A : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ (__lowercase :Optional[int] , __lowercase :List[Any] , __lowercase :List[str]=42 ) -> Tuple:
# Step 1: Convert pytorch tensor to numpy
_A : List[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_A : Any = flax_model.init_weights(PRNGKey(__lowercase ) )
_A : Optional[int] = flatten_dict(__lowercase )
_A : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_A : Optional[Any] = rename_key(__lowercase )
_A : str = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
_A , _A : Optional[int] = rename_key_and_reshape_tensor(__lowercase , __lowercase , __lowercase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
_A : Dict = jnp.asarray(__lowercase )
return unflatten_dict(__lowercase )
| 332 |
def a__ (__lowercase :Tuple ) -> Optional[Any]:
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_A : List[str] = len(__lowercase )
_A : Optional[Any] = max(__lowercase )
_A : Tuple = min(__lowercase )
# create the counting array
_A : Optional[Any] = coll_max + 1 - coll_min
_A : Dict = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , __lowercase ):
_A : List[str] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_A : str = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , __lowercase ) ):
_A : List[str] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def a__ (__lowercase :int ) -> List[str]:
return "".join([chr(__lowercase ) for i in counting_sort([ord(__lowercase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
_UpperCamelCase : Tuple =input('Enter numbers separated by a comma:\n').strip()
_UpperCamelCase : Any =[int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 332 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any=13 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Dict=[10, 20, 30, 40] , UpperCAmelCase__ : Union[str, Any]=[2, 2, 3, 2] , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Any=10 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[str]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Tuple=[2, 3, 4] , UpperCAmelCase__ : str=None , ) ->Optional[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = num_labels
A__ = initializer_range
A__ = out_features
A__ = out_indices
A__ = scope
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Any:
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : str) ->List[str]:
'''simple docstring'''
A__ = ConvNextVaModel(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int) ->Any:
'''simple docstring'''
A__ = ConvNextVaForImageClassification(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str) ->str:
'''simple docstring'''
A__ = ConvNextVaBackbone(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
A__ = None
A__ = ConvNextVaBackbone(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ = ConvNextVaModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''')
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
A__ , A__ = self.model_tester.prepare_config_and_inputs_with_labels()
A__ = True
if model_class.__name__ in [
*get_values(UpperCAmelCase__),
*get_values(UpperCAmelCase__),
]:
continue
A__ = model_class(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.train()
A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
A__ = model(**UpperCAmelCase__).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
A__ , A__ = self.model_tester.prepare_config_and_inputs_with_labels()
A__ = False
A__ = True
if (
model_class.__name__
in [*get_values(UpperCAmelCase__), *get_values(UpperCAmelCase__)]
or not model_class.supports_gradient_checkpointing
):
continue
A__ = model_class(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.gradient_checkpointing_enable()
model.train()
A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
A__ = model(**UpperCAmelCase__).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any]):
A__ = model_class(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__))
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__) , expected_num_stages + 1)
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ConvNextVaModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
A__ = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''').to(UpperCAmelCase__)
A__ = self.default_image_processor
A__ = prepare_img()
A__ = preprocessor(images=UpperCAmelCase__ , return_tensors='''pt''').to(UpperCAmelCase__)
# forward pass
with torch.no_grad():
A__ = model(**UpperCAmelCase__)
# verify the logits
A__ = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = torch.tensor([0.9996, 0.1966, -0.4386]).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
| 87 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def UpperCamelCase__ ( self ) -> List[str]:
__a = 32
__a = embedder_hidden_size
# image encoding components
__a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCamelCase , projection_dim=UpperCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase )
__a = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
__a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
__a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase , layers_per_block=1 , upcast_attention=UpperCamelCase , use_linear_projection=UpperCamelCase , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__a = AutoencoderKL()
__a = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase=0 , UpperCamelCase=True ) -> Dict:
if str(UpperCamelCase ).startswith('mps' ):
__a = torch.manual_seed(UpperCamelCase )
else:
__a = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(UpperCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self ) -> int:
__a = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableUnCLIPImgaImgPipeline(**UpperCamelCase )
__a = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
__a = self.get_dummy_inputs(UpperCamelCase )
inputs.update({'image_embeds': None} )
__a = sd_pipe(**UpperCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self ) -> Any:
__a = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCamelCase )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> str:
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe(UpperCamelCase , 'anime turle' , generator=UpperCamelCase , output_type='np' )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ ( self ) -> Optional[int]:
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe(UpperCamelCase , 'anime turle' , generator=UpperCamelCase , output_type='np' )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
__a = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = pipe(
UpperCamelCase , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 539 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowerCAmelCase : List[str] = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 604 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self ,a_ ,a_=13 ,a_=32 ,a_=2 ,a_=3 ,a_=16 ,a_=[1, 2, 1] ,a_=[2, 2, 4] ,a_=2 ,a_=2.0 ,a_=True ,a_=0.0 ,a_=0.0 ,a_=0.1 ,a_="gelu" ,a_=False ,a_=True ,a_=0.02 ,a_=1e-5 ,a_=True ,a_=None ,a_=True ,a_=10 ,a_=8 ,):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = depths
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = window_size
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = use_absolute_embeddings
lowerCAmelCase__ = patch_norm
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = is_training
lowerCAmelCase__ = scope
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = encoder_stride
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaModel(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase__ = model(a_ )
lowerCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase__ = model(a_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = SwinvaForMaskedImageModeling(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = SwinvaForImageClassification(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase__ = model(a_ ,labels=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaModelTester(self )
lowerCAmelCase__ = ConfigTester(self ,config_class=a_ ,embed_dim=37 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ ,nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(a_ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) )
lowerCAmelCase__ = outputs.attentions
lowerCAmelCase__ = len(self.model_tester.depths )
self.assertEqual(len(a_ ) ,a_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = config.window_size**2
lowerCAmelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(a_ ) ,a_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowerCAmelCase__ = len(a_ )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) )
if hasattr(self.model_tester ,'num_hidden_states_types' ):
lowerCAmelCase__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase__ = 2
self.assertEqual(out_len + added_hidden_states ,len(a_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(a_ ) ,a_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = getattr(
self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a_ ) ,a_ )
# Swinv2 has a different seq_length
lowerCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowerCAmelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(a_ ) ,a_ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = reshaped_hidden_states[0].shape
lowerCAmelCase__ = (
reshaped_hidden_states[0].view(a_ ,a_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
self.check_hidden_states_output(a_ ,a_ ,a_ ,a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
self.check_hidden_states_output(a_ ,a_ ,a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
self.check_hidden_states_output(a_ ,a_ ,a_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
self.check_hidden_states_output(a_ ,a_ ,a_ ,(padded_height, padded_width) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = SwinvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = _config_zero_init(a_ )
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(config=a_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@require_vision
@require_torch
class __snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
a_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ = image_processor(images=a_ ,return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**a_ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,a_ )
lowerCAmelCase__ = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,a_ ,atol=1e-4 ) )
| 604 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase : Optional[Any] = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _lowercase ( __UpperCamelCase : List[str] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCamelCase )
def _lowercase ( __UpperCamelCase : Union[str, Any] ):
from transformers.testing_utils import pytest_terminal_summary_main
snake_case__ = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__UpperCamelCase , id=__UpperCamelCase )
| 214 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Tuple ={
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple =["""MobileNetV2FeatureExtractor"""]
_lowerCAmelCase : Optional[int] =["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] =[
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_lowerCAmelCase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 113 | 0 |
'''simple docstring'''
def A ( A_ : str ):
snake_case : List[str] = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def A ( A_ : str ):
snake_case : List[Any] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
snake_case : Any = remove_duplicates(key.upper() )
snake_case : Any = len(A_ )
# First fill cipher with key characters
snake_case : List[Any] = {alphabet[i]: char for i, char in enumerate(A_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(A_ ) , 26 ):
snake_case : Optional[Any] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
snake_case : Union[str, Any] = alphabet[i - offset]
snake_case : List[Any] = char
return cipher_alphabet
def A ( A_ : str , A_ : dict[str, str] ):
return "".join(cipher_map.get(A_ , A_ ) for ch in message.upper() )
def A ( A_ : str , A_ : dict[str, str] ):
snake_case : Union[str, Any] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(A_ , A_ ) for ch in message.upper() )
def A ( ):
snake_case : Optional[int] = input('''Enter message to encode or decode: ''' ).strip()
snake_case : int = input('''Enter keyword: ''' ).strip()
snake_case : Any = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
snake_case : Optional[Any] = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
snake_case : str = create_cipher_map(A_ )
print(func(A_ , A_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 555 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def A ( A_ : str , A_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
snake_case : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(A_ )
snake_case, snake_case : Tuple = XLMProphetNetForConditionalGeneration.from_pretrained(
A_ , output_loading_info=A_ )
else:
snake_case : Union[str, Any] = ProphetNetForConditionalGenerationOld.from_pretrained(A_ )
snake_case, snake_case : List[Any] = ProphetNetForConditionalGeneration.from_pretrained(
A_ , output_loading_info=A_ )
snake_case : Union[str, Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
snake_case : str = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
snake_case : Optional[Any] = key.split('''.''' )
if attributes[0] == "lm_head":
snake_case : Optional[int] = prophet
snake_case : Union[str, Any] = prophet_old
else:
snake_case : Optional[int] = prophet.prophetnet
snake_case : Any = prophet_old.model
snake_case : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
snake_case : List[str] = mapping[attribute]
if not hasattr(A_ , A_ ) and len(A_ ) > 0:
snake_case : str = attribute
elif hasattr(A_ , A_ ):
snake_case : Any = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
snake_case : Optional[Any] = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
snake_case : Tuple = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
snake_case : List[str] = old_model.bias
logger.info(F"""{attribute} is initialized""" )
snake_case : Tuple = True
break
elif attribute in special_keys and hasattr(A_ , '''in_proj_weight''' ):
snake_case : Union[str, Any] = old_model.in_proj_weight.shape[0] // 3
snake_case : Any = getattr(A_ , A_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
snake_case : Tuple = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
snake_case : List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
snake_case : str = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
snake_case : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
snake_case : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
snake_case : List[str] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
snake_case : Optional[Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
snake_case : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
snake_case : Any = True
break
if attribute.isdigit():
snake_case : Optional[Any] = model[int(A_ )]
snake_case : List[str] = old_model[int(A_ )]
else:
snake_case : Optional[Any] = getattr(A_ , A_ )
if old_attribute == "":
snake_case : Union[str, Any] = old_model
else:
if not hasattr(A_ , A_ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
snake_case : Tuple = getattr(A_ , A_ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(A_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 555 | 1 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __snake_case ( lowerCAmelCase_ ) -> str:
SCREAMING_SNAKE_CASE__ = []
for line in lines:
SCREAMING_SNAKE_CASE__ = re.sub(r'''#.*''' , '''''' , lowerCAmelCase_ ) # remove comments
if line:
filtered_lines.append(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = '''\n'''.join(lowerCAmelCase_ )
# Make a hash from all this code
SCREAMING_SNAKE_CASE__ = full_str.encode('''utf-8''' )
return shaaaa(lowerCAmelCase_ ).hexdigest()
# get importable module names and hash for caching
_A : str = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_A : Union[str, Any] = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_A : List[str] = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
_A : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 100 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self: str , a: str=2000 , a: List[str]=0.1 , a: Any=20 , a: Dict=1e-3 ):
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
def _snake_case ( self: int , a: str , a: Union[str, torch.device] = None ):
__lowerCamelCase : int = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Tuple , a: Optional[Any] , a: Dict=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase : Tuple = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase : List[str] = std.unsqueeze(-1 )
__lowerCamelCase : Any = -score / std
# compute
__lowerCamelCase : List[Any] = -1.0 / len(self.timesteps )
__lowerCamelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase : int = beta_t.unsqueeze(-1 )
__lowerCamelCase : Any = -0.5 * beta_t * x
__lowerCamelCase : List[Any] = torch.sqrt(a )
__lowerCamelCase : Tuple = drift - diffusion**2 * score
__lowerCamelCase : str = x + drift * dt
# add noise
__lowerCamelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
__lowerCamelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 669 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__ : int = logging.get_logger(__name__)
__magic_name__ : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ : Union[str, Any] = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
__magic_name__ : int = {
'''gpt2''': 1024,
'''gpt2-medium''': 1024,
'''gpt2-large''': 1024,
'''gpt2-xl''': 1024,
'''distilgpt2''': 1024,
}
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["""input_ids""", """attention_mask"""]
snake_case__ = GPTaTokenizer
def __init__( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[str]="<|endoftext|>" , _SCREAMING_SNAKE_CASE : Dict="<|endoftext|>" , _SCREAMING_SNAKE_CASE : str="<|endoftext|>" , _SCREAMING_SNAKE_CASE : Union[str, Any]=False , **_SCREAMING_SNAKE_CASE : Any , ):
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = kwargs.pop('add_bos_token' , _SCREAMING_SNAKE_CASE )
UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop('type' ) )
UpperCamelCase = add_prefix_space
UpperCamelCase = pre_tok_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = add_prefix_space
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *_SCREAMING_SNAKE_CASE : Dict , **_SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
UpperCamelCase = kwargs.get('is_split_into_words' , _SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase = kwargs.get('is_split_into_words' , _SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : "Conversation" ):
"""simple docstring"""
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(_SCREAMING_SNAKE_CASE ) > self.model_max_length:
UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 410 |
import argparse
import os
import re
import packaging.version
__magic_name__ : Dict = '''examples/'''
__magic_name__ : List[str] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__magic_name__ : Any = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__magic_name__ : int = '''README.md'''
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Union[str, Any]:
"""simple docstring"""
with open(_UpperCamelCase , 'r' , encoding='utf-8' , newline='\n') as f:
UpperCamelCase = f.read()
UpperCamelCase , UpperCamelCase = REPLACE_PATTERNS[pattern]
UpperCamelCase = replace.replace('VERSION' , _UpperCamelCase)
UpperCamelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase)
with open(_UpperCamelCase , 'w' , encoding='utf-8' , newline='\n') as f:
f.write(_UpperCamelCase)
def lowercase__ ( _UpperCamelCase) -> Dict:
"""simple docstring"""
for folder, directories, fnames in os.walk(_UpperCamelCase):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects')
if "legacy" in directories:
directories.remove('legacy')
for fname in fnames:
if fname.endswith('.py'):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase) , _UpperCamelCase , pattern='examples')
def lowercase__ ( _UpperCamelCase , _UpperCamelCase=False) -> Any:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
if not patch:
update_version_in_examples(_UpperCamelCase)
def lowercase__ ( ) -> str:
"""simple docstring"""
UpperCamelCase = '🤗 Transformers currently provides the following architectures'
UpperCamelCase = '1. Want to contribute a new model?'
with open(_UpperCamelCase , 'r' , encoding='utf-8' , newline='\n') as f:
UpperCamelCase = f.readlines()
# Find the start of the list.
UpperCamelCase = 0
while not lines[start_index].startswith(_start_prompt):
start_index += 1
start_index += 1
UpperCamelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt):
if lines[index].startswith('1.'):
UpperCamelCase = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_UpperCamelCase , 'w' , encoding='utf-8' , newline='\n') as f:
f.writelines(_UpperCamelCase)
def lowercase__ ( ) -> str:
"""simple docstring"""
with open(REPLACE_FILES['init'] , 'r') as f:
UpperCamelCase = f.read()
UpperCamelCase = REPLACE_PATTERNS['init'][0].search(_UpperCamelCase).groups()[0]
return packaging.version.parse(_UpperCamelCase)
def lowercase__ ( _UpperCamelCase=False) -> str:
"""simple docstring"""
UpperCamelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!')
if default_version.is_devrelease:
UpperCamelCase = default_version.base_version
elif patch:
UpperCamelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
UpperCamelCase = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
UpperCamelCase = input(F'Which version are you releasing? [{default_version}]')
if len(_UpperCamelCase) == 0:
UpperCamelCase = default_version
print(F'Updating version to {version}.')
global_version_update(_UpperCamelCase , patch=_UpperCamelCase)
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.')
clean_main_ref_in_model_list()
def lowercase__ ( ) -> int:
"""simple docstring"""
UpperCamelCase = get_version()
UpperCamelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
UpperCamelCase = current_version.base_version
# Check with the user we got that right.
UpperCamelCase = input(F'Which version are we developing now? [{dev_version}]')
if len(_UpperCamelCase) == 0:
UpperCamelCase = dev_version
print(F'Updating version to {version}.')
global_version_update(_UpperCamelCase)
print('Cleaning main README, don\'t forget to run `make fix-copies`.')
clean_main_ref_in_model_list()
if __name__ == "__main__":
__magic_name__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__magic_name__ : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 410 | 1 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
_a = np.argmax(_lowercase , axis=1 )
return np.sum(outputs == labels )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
with open(_lowercase , encoding='utf_8' ) as f:
_a = csv.reader(_lowercase )
_a = []
next(_lowercase ) # skip the first line
for line in tqdm(_lowercase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
_a = []
for dataset in encoded_datasets:
_a = len(_lowercase )
_a = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_a = np.zeros((n_batch, 2) , dtype=np.intaa )
_a = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
_a = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_lowercase ):
_a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_a = with_conta
_a = with_conta
_a = len(_lowercase ) - 1
_a = len(_lowercase ) - 1
_a = with_conta
_a = with_conta
_a = mc_label
_a = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_lowercase ) for t in all_inputs ) )
return tensor_datasets
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_a = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=_lowercase , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=_lowercase , type=_lowercase , required=_lowercase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=_lowercase , default='' )
parser.add_argument('--eval_dataset' , type=_lowercase , default='' )
parser.add_argument('--seed' , type=_lowercase , default=42 )
parser.add_argument('--num_train_epochs' , type=_lowercase , default=3 )
parser.add_argument('--train_batch_size' , type=_lowercase , default=8 )
parser.add_argument('--eval_batch_size' , type=_lowercase , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=_lowercase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=_lowercase , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=_lowercase , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_lowercase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=_lowercase , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=_lowercase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=_lowercase , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=_lowercase , default=0.01 )
parser.add_argument('--lm_coef' , type=_lowercase , default=0.9 )
parser.add_argument('--n_valid' , type=_lowercase , default=374 )
parser.add_argument('--server_ip' , type=_lowercase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_lowercase , default='' , help='Can be used for distant debugging.' )
_a = parser.parse_args()
print(_lowercase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowercase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_a = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_a = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(_lowercase , _lowercase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_a = ['_start_', '_delimiter_', '_classify_']
_a = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_lowercase )
_a = tokenizer.convert_tokens_to_ids(_lowercase )
_a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_lowercase ) )
model.to(_lowercase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCAmelCase ):
if isinstance(_lowercase , _lowercase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_lowercase ) )
elif isinstance(_lowercase , _lowercase ):
return obj
return [tokenize_and_encode(_lowercase ) for o in obj]
logger.info('Encoding dataset...' )
_a = load_rocstories_dataset(args.train_dataset )
_a = load_rocstories_dataset(args.eval_dataset )
_a = (train_dataset, eval_dataset)
_a = tokenize_and_encode(_lowercase )
# Compute the max input length for the Transformer
_a = model.config.n_positions // 2 - 2
_a = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_a = min(_lowercase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_a = pre_process_datasets(_lowercase , _lowercase , _lowercase , *_lowercase )
_a , _a = tensor_datasets[0], tensor_datasets[1]
_a = TensorDataset(*_lowercase )
_a = RandomSampler(_lowercase )
_a = DataLoader(_lowercase , sampler=_lowercase , batch_size=args.train_batch_size )
_a = TensorDataset(*_lowercase )
_a = SequentialSampler(_lowercase )
_a = DataLoader(_lowercase , sampler=_lowercase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_a = args.max_steps
_a = args.max_steps // (len(_lowercase ) // args.gradient_accumulation_steps) + 1
else:
_a = len(_lowercase ) // args.gradient_accumulation_steps * args.num_train_epochs
_a = list(model.named_parameters() )
_a = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
_a = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
_a = AdamW(_lowercase , lr=args.learning_rate , eps=args.adam_epsilon )
_a = get_linear_schedule_with_warmup(
_lowercase , num_warmup_steps=args.warmup_steps , num_training_steps=_lowercase )
if args.do_train:
_a , _a , _a = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
_a = 0
_a = 0
_a = tqdm(_lowercase , desc='Training' )
for step, batch in enumerate(_lowercase ):
_a = tuple(t.to(_lowercase ) for t in batch )
_a , _a , _a , _a = batch
_a = model(_lowercase , mc_token_ids=_lowercase , lm_labels=_lowercase , mc_labels=_lowercase )
_a = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_a = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_a = 'Training loss: {:.2e} lr: {:.2e}'.format(_lowercase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_a = model.module if hasattr(_lowercase , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_a = os.path.join(args.output_dir , _lowercase )
_a = os.path.join(args.output_dir , _lowercase )
torch.save(model_to_save.state_dict() , _lowercase )
model_to_save.config.to_json_file(_lowercase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_a = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_lowercase )
if args.do_eval:
model.eval()
_a , _a = 0, 0
_a , _a = 0, 0
for batch in tqdm(_lowercase , desc='Evaluating' ):
_a = tuple(t.to(_lowercase ) for t in batch )
_a , _a , _a , _a = batch
with torch.no_grad():
_a , _a , _a , _a = model(
_lowercase , mc_token_ids=_lowercase , lm_labels=_lowercase , mc_labels=_lowercase )
_a = mc_logits.detach().cpu().numpy()
_a = mc_labels.to('cpu' ).numpy()
_a = accuracy(_lowercase , _lowercase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_a = eval_loss / nb_eval_steps
_a = eval_accuracy / nb_eval_examples
_a = tr_loss / nb_tr_steps if args.do_train else None
_a = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
_a = os.path.join(args.output_dir , 'eval_results.txt' )
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _lowercase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 562 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase__ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : str ,a__ : int ,a__ : int ,a__ : int ,a__ : float ,a__ : int ,a__ : int ,a__ : int ,a__ : int ,a__ : str ,a__ : bool = False ,):
super().__init__()
a__ = nn.Embedding(a__ ,a__ )
a__ = nn.Embedding(a__ ,a__ )
a__ = False
a__ = nn.Dropout(p=a__ )
a__ = TaConfig(
vocab_size=a__ ,d_model=a__ ,num_heads=a__ ,d_kv=a__ ,d_ff=a__ ,dropout_rate=a__ ,feed_forward_proj=a__ ,is_decoder=a__ ,is_encoder_decoder=a__ ,)
a__ = nn.ModuleList()
for lyr_num in range(a__ ):
a__ = TaBlock(a__ )
self.encoders.append(a__ )
a__ = TaLayerNorm(a__ )
a__ = nn.Dropout(p=a__ )
def lowerCAmelCase_ ( self : Optional[Any] ,a__ : Tuple ,a__ : Optional[int] ):
a__ = self.token_embedder(a__ )
a__ = encoder_input_tokens.shape[1]
a__ = torch.arange(a__ ,device=encoder_input_tokens.device )
x += self.position_encoding(a__ )
a__ = self.dropout_pre(a__ )
# inverted the attention mask
a__ = encoder_input_tokens.size()
a__ = self.get_extended_attention_mask(a__ ,a__ )
for lyr in self.encoders:
a__ = lyr(a__ ,a__ )[0]
a__ = self.layer_norm(a__ )
return self.dropout_post(a__ ), encoder_inputs_mask
| 331 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowerCamelCase__ = logging.getLogger(__name__)
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''summarization'''
lowerCamelCase__ = ['''loss''']
lowerCamelCase__ = ROUGE_KEYS
lowerCamelCase__ = '''rouge2'''
def __init__( self , _a , **_a ) -> Optional[Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCAmelCase_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(_a , num_labels=_a , mode=self.mode , **_a )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
lowerCAmelCase_ = Path(self.output_dir ) / "metrics.json"
lowerCAmelCase_ = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
lowerCAmelCase_ = 0
lowerCAmelCase_ = defaultdict(_a )
lowerCAmelCase_ = self.config.model_type
lowerCAmelCase_ = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
lowerCAmelCase_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCAmelCase_ = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
lowerCAmelCase_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCAmelCase_ = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCAmelCase_ = get_git_info()["repo_sha"]
lowerCAmelCase_ = hparams.num_workers
lowerCAmelCase_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _a ):
lowerCAmelCase_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCAmelCase_ = self.decoder_start_token_id
lowerCAmelCase_ = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
lowerCAmelCase_ = False
lowerCAmelCase_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCAmelCase_ = self.hparams.eval_max_gen_length
else:
lowerCAmelCase_ = self.model.config.max_length
lowerCAmelCase_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __a ( self , _a ) -> Dict[str, List[str]]:
lowerCAmelCase_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(_a , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
lowerCAmelCase_ = True
return readable_batch
def __a ( self , _a , **_a ) -> Union[str, Any]:
return self.model(_a , **_a )
def __a ( self , _a ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer.batch_decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
return lmap(str.strip , _a )
def __a ( self , _a ) -> Tuple:
lowerCAmelCase_ = self.tokenizer.pad_token_id
lowerCAmelCase_ , lowerCAmelCase_ = batch["input_ids"], batch["attention_mask"]
lowerCAmelCase_ = batch["labels"]
if isinstance(self.model , _a ):
lowerCAmelCase_ = self.model._shift_right(_a )
else:
lowerCAmelCase_ = shift_tokens_right(_a , _a )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCAmelCase_ = decoder_input_ids
self.save_readable_batch(_a )
lowerCAmelCase_ = self(_a , attention_mask=_a , decoder_input_ids=_a , use_cache=_a )
lowerCAmelCase_ = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCAmelCase_ = nn.CrossEntropyLoss(ignore_index=_a )
assert lm_logits.shape[-1] == self.vocab_size
lowerCAmelCase_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCAmelCase_ = nn.functional.log_softmax(_a , dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ = label_smoothed_nll_loss(
_a , _a , self.hparams.label_smoothing , ignore_index=_a )
return (loss,)
@property
def __a ( self ) -> int:
return self.tokenizer.pad_token_id
def __a ( self , _a , _a ) -> Dict:
lowerCAmelCase_ = self._step(_a )
lowerCAmelCase_ = dict(zip(self.loss_names , _a ) )
# tokens per batch
lowerCAmelCase_ = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
lowerCAmelCase_ = batch["input_ids"].shape[0]
lowerCAmelCase_ = batch["input_ids"].eq(self.pad ).sum()
lowerCAmelCase_ = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __a ( self , _a , _a ) -> Dict:
return self._generative_step(_a )
def __a ( self , _a , _a="val" ) -> Dict:
self.step_count += 1
lowerCAmelCase_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCAmelCase_ = losses["loss"]
lowerCAmelCase_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
lowerCAmelCase_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCAmelCase_ = torch.tensor(_a ).type_as(_a )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_a )
lowerCAmelCase_ = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
lowerCAmelCase_ = self.step_count
self.metrics[prefix].append(_a ) # callback writes this to self.metrics_save_path
lowerCAmelCase_ = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"{prefix}_loss": loss,
f"{prefix}_{self.val_metric}": metric_tensor,
}
def __a ( self , _a , _a ) -> Dict:
return calculate_rouge(_a , _a )
def __a ( self , _a ) -> dict:
lowerCAmelCase_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCAmelCase_ = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=_a , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCAmelCase_ = (time.time() - ta) / batch["input_ids"].shape[0]
lowerCAmelCase_ = self.ids_to_clean_text(_a )
lowerCAmelCase_ = self.ids_to_clean_text(batch["labels"] )
lowerCAmelCase_ = self._step(_a )
lowerCAmelCase_ = dict(zip(self.loss_names , _a ) )
lowerCAmelCase_ = self.calc_generative_metrics(_a , _a )
lowerCAmelCase_ = np.mean(lmap(_a , _a ) )
base_metrics.update(gen_time=_a , gen_len=_a , preds=_a , target=_a , **_a )
return base_metrics
def __a ( self , _a , _a ) -> List[str]:
return self._generative_step(_a )
def __a ( self , _a ) -> List[Any]:
return self.validation_epoch_end(_a , prefix="test" )
def __a ( self , _a ) -> SeqaSeqDataset:
lowerCAmelCase_ = self.n_obs[type_path]
lowerCAmelCase_ = self.target_lens[type_path]
lowerCAmelCase_ = self.dataset_class(
self.tokenizer , type_path=_a , n_obs=_a , max_target_length=_a , **self.dataset_kwargs , )
return dataset
def __a ( self , _a , _a , _a = False ) -> DataLoader:
lowerCAmelCase_ = self.get_dataset(_a )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCAmelCase_ = dataset.make_sortish_sampler(_a , distributed=self.hparams.gpus > 1 )
return DataLoader(
_a , batch_size=_a , collate_fn=dataset.collate_fn , shuffle=_a , num_workers=self.num_workers , sampler=_a , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCAmelCase_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_a , batch_sampler=_a , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_a , batch_size=_a , collate_fn=dataset.collate_fn , shuffle=_a , num_workers=self.num_workers , sampler=_a , )
def __a ( self ) -> DataLoader:
lowerCAmelCase_ = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=_a )
return dataloader
def __a ( self ) -> DataLoader:
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def __a ( self ) -> DataLoader:
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __a ( _a , _a ) -> Any:
BaseTransformer.add_model_specific_args(_a , _a )
add_generic_args(_a , _a )
parser.add_argument(
"--max_source_length" , default=1024 , type=_a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=56 , type=_a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=142 , type=_a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=142 , type=_a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=_a )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=_a )
parser.add_argument("--max_tokens_per_batch" , type=_a , default=_a )
parser.add_argument("--logger_name" , type=_a , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=_a , default=-1 , required=_a , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=_a , default=500 , required=_a , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=_a , default=-1 , required=_a , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=_a , default="summarization" , required=_a , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=_a , default=0.0 , required=_a )
parser.add_argument("--src_lang" , type=_a , default="" , required=_a )
parser.add_argument("--tgt_lang" , type=_a , default="" , required=_a )
parser.add_argument("--eval_beams" , type=_a , default=_a , required=_a )
parser.add_argument(
"--val_metric" , type=_a , default=_a , required=_a , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=_a , default=_a , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=_a , default=1 , required=_a , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=_a , default=-1 , required=_a , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''translation'''
lowerCamelCase__ = ['''loss''']
lowerCamelCase__ = ['''bleu''']
lowerCamelCase__ = '''bleu'''
def __init__( self , _a , **_a ) -> List[str]:
super().__init__(_a , **_a )
lowerCAmelCase_ = hparams.src_lang
lowerCAmelCase_ = hparams.tgt_lang
def __a ( self , _a , _a ) -> dict:
return calculate_bleu(_a , _a )
def A(__a: Optional[Any] , __a: Any=None ):
Path(args.output_dir ).mkdir(exist_ok=__a )
check_output_dir(__a , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCAmelCase_ = SummarizationModule(__a )
else:
lowerCAmelCase_ = TranslationModule(__a )
lowerCAmelCase_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
lowerCAmelCase_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase_ = os.environ.get("WANDB_PROJECT" , __a )
lowerCAmelCase_ = WandbLogger(name=model.output_dir.name , project=__a )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase_ = WandbLogger(name=model.output_dir.name , project=F"hf_{dataset}" )
if args.early_stopping_patience >= 0:
lowerCAmelCase_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCAmelCase_ = False
lowerCAmelCase_ = args.val_metric == "loss"
lowerCAmelCase_ = generic_train(
__a , __a , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __a ) , early_stopping_callback=__a , logger=__a , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
lowerCAmelCase_ = ""
lowerCAmelCase_ = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=__a ) )
if checkpoints:
lowerCAmelCase_ = checkpoints[-1]
lowerCAmelCase_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
lowerCamelCase__ = pl.Trainer.add_argparse_args(parser)
lowerCamelCase__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowerCamelCase__ = parser.parse_args()
main(args)
| 713 |
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCamelCase__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCamelCase__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def A(__a: list[list[int]] ):
lowerCAmelCase_ = []
for i in range(len(__a ) ):
lowerCAmelCase_ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase_ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__a ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__a ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__a ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase_ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__a )
return next_generation
def A(__a: list[list[int]] , __a: int ):
lowerCAmelCase_ = []
for _ in range(__a ):
# Create output image
lowerCAmelCase_ = Image.new("RGB" , (len(cells[0] ), len(__a )) )
lowerCAmelCase_ = img.load()
# Save cells to image
for x in range(len(__a ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase_ = 255 - cells[y][x] * 255
lowerCAmelCase_ = (colour, colour, colour)
# Save image
images.append(__a )
lowerCAmelCase_ = new_generation(__a )
return images
if __name__ == "__main__":
lowerCamelCase__ = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 226 | 0 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowerCamelCase : List[str] = parse(importlib.metadata.version('torch'))
def _SCREAMING_SNAKE_CASE (A , A , A ) -> List[str]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" )
lowercase__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(A , A ):
lowercase__ = parse(importlib.metadata.version(A ) )
return operation(A , parse(A ) )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[int]:
"""simple docstring"""
return compare_versions(A , A , A )
| 460 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _SCREAMING_SNAKE_CASE (A ) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowercase__ = json.loads(open(A ).read() )
if not params:
raise ValueError(
f"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file." )
if not args.output.endswith('''.pt''' ):
lowercase__ = args.output + '''.pt'''
lowercase__ = OrderedDict()
with tf.device('''/CPU:0''' ):
lowercase__ = tf.train.load_checkpoint(args.tf_model_dir )
lowercase__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase__ = reader.get_tensor(A ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowercase__ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowercase__ = 8
lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/moe''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/softmlp/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowercase__ = key_name[-9:-7]
for i in range(16 ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowercase__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/mlp''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/p1/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/p2/kernel''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/p2/bias''' ):
lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/ln''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/att''' ):
lowercase__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase__ = state[:, 0, :, :]
lowercase__ = state[:, 1, :, :]
lowercase__ = state[:, 2, :, :]
lowercase__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowercase__ = torch.tensor(A )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowercase__ = torch.tensor(A )
lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/o/kernel''' ):
lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowercase__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/an''' ):
lowercase__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif key_name.endswith('''/g''' ):
lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowercase__ = '''model.%s.weight''' % nlayer
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(A )
if key_name.startswith('''model/wte''' ):
lowercase__ = '''lm_head.weight'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(A )
elif key_name.startswith('''model/wob''' ):
lowercase__ = '''final_logits_bias'''
lowercase__ = vnp.copy() # same in embedded
lowercase__ = state.reshape((1, -1) )
lowercase__ = torch.tensor(A )
elif key_name == "model/dense/kernel":
lowercase__ = '''model.last_project.weight'''
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(A )
elif key_name == "model/dense_1/bias":
lowercase__ = '''model.last_project.bias'''
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(A )
torch.save(A , args.output )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
lowerCamelCase : Dict = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 460 | 1 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: int = 1000 ):
"""simple docstring"""
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 491 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_snake_case = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( enum.Enum ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any = 0
SCREAMING_SNAKE_CASE_: str = 1
@add_end_docstrings(UpperCAmelCase )
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int = "generated"
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Union[str, Any] , ) -> int:
"""simple docstring"""
_lowerCAmelCase = {}
if truncation is not None:
_lowerCAmelCase = truncation
_lowerCAmelCase = generate_kwargs
_lowerCAmelCase = {}
if return_tensors is not None and return_type is None:
_lowerCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
_lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_lowerCAmelCase = self.tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
return True
def __lowerCamelCase ( self : Union[str, Any] , *UpperCAmelCase_ : str , UpperCAmelCase_ : int ) -> str:
"""simple docstring"""
_lowerCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , UpperCAmelCase_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
_lowerCAmelCase = ([prefix + arg for arg in args[0]],)
_lowerCAmelCase = True
elif isinstance(args[0] , UpperCAmelCase_ ):
_lowerCAmelCase = (prefix + args[0],)
_lowerCAmelCase = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_lowerCAmelCase = self.tokenizer(*UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple ) -> str:
"""simple docstring"""
_lowerCAmelCase = super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_ )
if (
isinstance(args[0] , UpperCAmelCase_ )
and all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for el in args[0] )
and all(len(UpperCAmelCase_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __lowerCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCAmelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = self._parse_and_tokenize(UpperCAmelCase_ , truncation=UpperCAmelCase_ , **UpperCAmelCase_ )
return inputs
def __lowerCamelCase ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) -> Any:
"""simple docstring"""
if self.framework == "pt":
_lowerCAmelCase , _lowerCAmelCase = model_inputs['input_ids'].shape
elif self.framework == "tf":
_lowerCAmelCase , _lowerCAmelCase = tf.shape(model_inputs['input_ids'] ).numpy()
_lowerCAmelCase = generate_kwargs.get('min_length' , self.model.config.min_length )
_lowerCAmelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(UpperCAmelCase_ , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
_lowerCAmelCase = self.model.generate(**UpperCAmelCase_ , **UpperCAmelCase_ )
_lowerCAmelCase = output_ids.shape[0]
if self.framework == "pt":
_lowerCAmelCase = output_ids.reshape(UpperCAmelCase_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_lowerCAmelCase = tf.reshape(UpperCAmelCase_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=ReturnType.TEXT , UpperCAmelCase_ : Optional[Any]=False ) -> str:
"""simple docstring"""
_lowerCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_lowerCAmelCase = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_lowerCAmelCase = {
F"""{self.return_name}_text""": self.tokenizer.decode(
UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , )
}
records.append(UpperCAmelCase_ )
return records
@add_end_docstrings(UpperCAmelCase )
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] = "summary"
def __call__( self : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict ) -> int:
"""simple docstring"""
return super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCAmelCase )
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] = "translation"
def __lowerCamelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> Any:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def __lowerCamelCase ( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None ) -> Optional[int]:
"""simple docstring"""
if getattr(self.tokenizer , '_build_translation_inputs' , UpperCAmelCase_ ):
return self.tokenizer._build_translation_inputs(
*UpperCAmelCase_ , return_tensors=self.framework , truncation=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ )
else:
return super()._parse_and_tokenize(*UpperCAmelCase_ , truncation=UpperCAmelCase_ )
def __lowerCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = super()._sanitize_parameters(**UpperCAmelCase_ )
if src_lang is not None:
_lowerCAmelCase = src_lang
if tgt_lang is not None:
_lowerCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_lowerCAmelCase = kwargs.get('task' , self.task )
_lowerCAmelCase = task.split('_' )
if task and len(UpperCAmelCase_ ) == 4:
# translation, XX, to YY
_lowerCAmelCase = items[1]
_lowerCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 491 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
SCREAMING_SNAKE_CASE = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase).raw).convert('RGB')
return image
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding'))
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding'))
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight'))
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias'))
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight'))
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias'))
for i in range(config.vision_config.num_hidden_layers):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',))
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight'''))
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias'''))
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight'))
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias'))
# fmt: on
return rename_keys
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = dct.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = val
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
for i in range(config.vision_config.num_hidden_layers):
# read in original q and v biases
SCREAMING_SNAKE_CASE = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''')
SCREAMING_SNAKE_CASE = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''')
# next, set bias in the state dict
SCREAMING_SNAKE_CASE = torch.cat((q_bias, torch.zeros_like(_UpperCAmelCase , requires_grad=_UpperCAmelCase), v_bias))
SCREAMING_SNAKE_CASE = qkv_bias
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 364 if 'coco' in model_name else 224
SCREAMING_SNAKE_CASE = BlipaVisionConfig(image_size=_UpperCAmelCase).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_UpperCAmelCase).to_dict()
elif "opt-6.7b" in model_name:
SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_UpperCAmelCase).to_dict()
elif "t5-xl" in model_name:
SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1).to_dict()
elif "t5-xxl" in model_name:
SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1).to_dict()
SCREAMING_SNAKE_CASE = BlipaConfig(vision_config=_UpperCAmelCase , text_config=_UpperCAmelCase)
return config, image_size
@torch.no_grad()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False):
SCREAMING_SNAKE_CASE = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b')
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl')
)
SCREAMING_SNAKE_CASE = tokenizer('\n' , add_special_tokens=_UpperCAmelCase).input_ids[0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_blipa_config(_UpperCAmelCase , eos_token_id=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = BlipaForConditionalGeneration(_UpperCAmelCase).eval()
SCREAMING_SNAKE_CASE = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model_name_to_original[model_name]
# load original model
print('Loading original model...')
SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = load_model_and_preprocess(
name=_UpperCAmelCase , model_type=_UpperCAmelCase , is_eval=_UpperCAmelCase , device=_UpperCAmelCase)
original_model.eval()
print('Done!')
# update state dict keys
SCREAMING_SNAKE_CASE = original_model.state_dict()
SCREAMING_SNAKE_CASE = create_rename_keys(_UpperCAmelCase)
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE = state_dict.pop(_UpperCAmelCase)
if key.startswith('Qformer.bert'):
SCREAMING_SNAKE_CASE = key.replace('Qformer.bert' , 'qformer')
if "attention.self" in key:
SCREAMING_SNAKE_CASE = key.replace('self' , 'attention')
if "opt_proj" in key:
SCREAMING_SNAKE_CASE = key.replace('opt_proj' , 'language_projection')
if "t5_proj" in key:
SCREAMING_SNAKE_CASE = key.replace('t5_proj' , 'language_projection')
if key.startswith('opt'):
SCREAMING_SNAKE_CASE = key.replace('opt' , 'language')
if key.startswith('t5'):
SCREAMING_SNAKE_CASE = key.replace('t5' , 'language')
SCREAMING_SNAKE_CASE = val
# read in qv biases
read_in_q_v_bias(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = hf_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase)
assert len(_UpperCAmelCase) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
SCREAMING_SNAKE_CASE = load_demo_image()
SCREAMING_SNAKE_CASE = vis_processors['eval'](_UpperCAmelCase).unsqueeze(0).to(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = tokenizer(['\n'] , return_tensors='pt').input_ids.to(_UpperCAmelCase)
# create processor
SCREAMING_SNAKE_CASE = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = BlipaProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = processor(images=_UpperCAmelCase , return_tensors='pt').pixel_values.to(_UpperCAmelCase)
# make sure processor creates exact same pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase)
original_model.to(_UpperCAmelCase)
hf_model.to(_UpperCAmelCase)
with torch.no_grad():
if "opt" in model_name:
SCREAMING_SNAKE_CASE = original_model({'image': original_pixel_values, 'text_input': ['']}).logits
SCREAMING_SNAKE_CASE = hf_model(_UpperCAmelCase , _UpperCAmelCase).logits
else:
SCREAMING_SNAKE_CASE = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']}).logits
SCREAMING_SNAKE_CASE = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100)
SCREAMING_SNAKE_CASE = hf_model(_UpperCAmelCase , _UpperCAmelCase , labels=_UpperCAmelCase).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3])
print('First values of HF logits:' , logits[0, :3, :3])
# assert values
if model_name == "blip2-flan-t5-xl":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_UpperCAmelCase)
assert torch.allclose(logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4)
elif model_name == "blip2-flan-t5-xl-coco":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_UpperCAmelCase)
else:
# cast to same type
SCREAMING_SNAKE_CASE = logits.dtype
assert torch.allclose(original_logits.to(_UpperCAmelCase) , _UpperCAmelCase , atol=1e-2)
print('Looks ok!')
print('Generating a caption...')
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = tokenizer(_UpperCAmelCase , return_tensors='pt').input_ids.to(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = original_model.generate({'image': original_pixel_values})
SCREAMING_SNAKE_CASE = hf_model.generate(
_UpperCAmelCase , _UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = input_ids.shape[1]
SCREAMING_SNAKE_CASE = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = [text.strip() for text in output_text]
print('HF generation:' , _UpperCAmelCase)
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_UpperCAmelCase)
hf_model.save_pretrained(_UpperCAmelCase)
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''')
hf_model.push_to_hub(F'''nielsr/{model_name}''')
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
a_ : Optional[int] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
a_ : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 73 |
import math
import os
import sys
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = ''
try:
with open(_UpperCAmelCase , 'rb') as binary_file:
SCREAMING_SNAKE_CASE = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lexicon.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = last_match_id
if math.loga(_UpperCAmelCase).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE = '0' + lexicon[curr_key]
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = '', ''
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
for i in range(len(_UpperCAmelCase)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
index += 1
SCREAMING_SNAKE_CASE = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = os.path.getsize(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 8
try:
with open(_UpperCAmelCase , 'wb') as opened_file:
SCREAMING_SNAKE_CASE = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase) , _UpperCAmelCase)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('10000000')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCAmelCase , 2).to_bytes(1 , byteorder='big'))
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = read_file_binary(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = compress_data(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = add_file_length(_UpperCAmelCase , _UpperCAmelCase)
write_file_binary(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 73 | 1 |
from math import ceil
def _A ( __snake_case :int = 1001 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__SCREAMING_SNAKE_CASE = 2 * i + 1
__SCREAMING_SNAKE_CASE = 2 * i
__SCREAMING_SNAKE_CASE = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_snake_case : Union[str, Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 214 |
import argparse
_snake_case : Union[str, Any] = 'docs/source/_static/js/custom.js'
def _A ( __snake_case :List[Any] ) -> Any:
"""simple docstring"""
with open(__snake_case , encoding="utf-8" , newline="\n" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
__SCREAMING_SNAKE_CASE = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(__snake_case , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__snake_case )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
_snake_case : Tuple = parser.parse_args()
update_custom_js(args.version)
| 214 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
snake_case_ = TypeVar('T')
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
def __init__(self : Union[str, Any] , a__ : T ):
"""simple docstring"""
__snake_case = data
__snake_case = None
def __str__(self : Optional[int] ):
"""simple docstring"""
return f"""{self.data}"""
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
def __init__(self : List[str] ):
"""simple docstring"""
__snake_case = None
def __iter__(self : List[Any] ):
"""simple docstring"""
__snake_case = self.top
while node:
yield node.data
__snake_case = node.next
def __str__(self : Any ):
"""simple docstring"""
return "->".join([str(a__ ) for item in self] )
def __len__(self : List[Any] ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def a (self : List[str] ):
"""simple docstring"""
return self.top is None
def a (self : List[Any] , a__ : T ):
"""simple docstring"""
__snake_case = Node(a__ )
if not self.is_empty():
__snake_case = self.top
__snake_case = node
def a (self : Any ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , a__ )
__snake_case = self.top
__snake_case = self.top.next
return pop_node.data
def a (self : List[str] ):
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 592 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : int = 'xlnet'
A_ : str = ['mems']
A_ : str = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self : str , a__ : Any=3_2000 , a__ : List[str]=1024 , a__ : List[str]=24 , a__ : Dict=16 , a__ : Optional[int]=4096 , a__ : Optional[int]="gelu" , a__ : Optional[Any]=True , a__ : int="bi" , a__ : Optional[Any]=0.0_2 , a__ : str=1E-12 , a__ : Dict=0.1 , a__ : List[str]=512 , a__ : Optional[int]=None , a__ : Optional[int]=True , a__ : Any=False , a__ : List[Any]=False , a__ : List[Any]=-1 , a__ : Optional[Any]=False , a__ : List[Any]="last" , a__ : Dict=True , a__ : Union[str, Any]="tanh" , a__ : Optional[int]=0.1 , a__ : Tuple=5 , a__ : str=5 , a__ : int=5 , a__ : Any=1 , a__ : List[Any]=2 , **a__ : Union[str, Any] , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = d_model
__snake_case = n_layer
__snake_case = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
__snake_case = d_model // n_head
__snake_case = ff_activation
__snake_case = d_inner
__snake_case = untie_r
__snake_case = attn_type
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = dropout
__snake_case = mem_len
__snake_case = reuse_len
__snake_case = bi_data
__snake_case = clamp_len
__snake_case = same_length
__snake_case = summary_type
__snake_case = summary_use_proj
__snake_case = summary_activation
__snake_case = summary_last_dropout
__snake_case = start_n_top
__snake_case = end_n_top
__snake_case = bos_token_id
__snake_case = pad_token_id
__snake_case = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , a__ , )
__snake_case = kwargs['''use_cache''']
__snake_case = use_mems_eval
__snake_case = use_mems_train
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
@property
def a (self : Any ):
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a (self : Dict , a__ : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 592 | 1 |
"""simple docstring"""
import heapq
import sys
import numpy as np
lowercase = tuple[int, int]
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self) -> List[str]:
'''simple docstring'''
snake_case__ : int = []
snake_case__ : Optional[int] = set()
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float("inf")
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return len(self.elements) == 0
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowerCamelCase__)
else:
# update
# print("update", item)
snake_case__ : int = []
(snake_case__) : List[str] = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
(snake_case__) : Optional[int] = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def UpperCAmelCase ( self , lowerCamelCase__) -> List[Any]:
'''simple docstring'''
if item in self.set:
self.set.remove(lowerCamelCase__)
snake_case__ : Any = []
(snake_case__) : List[str] = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
(snake_case__) : List[Any] = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return self.elements[0][1]
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
(snake_case__) : Dict = heapq.heappop(self.elements)
self.set.remove(lowerCamelCase__)
return (priority, item)
def A__ ( _UpperCAmelCase : TPos , _UpperCAmelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : str = np.array(_UpperCAmelCase )
snake_case__ : List[Any] = np.array(_UpperCAmelCase )
return np.linalg.norm(a - b )
def A__ ( _UpperCAmelCase : TPos , _UpperCAmelCase : TPos ) -> str:
'''simple docstring'''
return consistent_heuristic(_UpperCAmelCase , _UpperCAmelCase ) // t
def A__ ( _UpperCAmelCase : TPos , _UpperCAmelCase : TPos ) -> Any:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def A__ ( _UpperCAmelCase : TPos , _UpperCAmelCase : int , _UpperCAmelCase : TPos , _UpperCAmelCase : dict[TPos, float] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = g_function[start] + Wa * heuristics[i](_UpperCAmelCase , _UpperCAmelCase )
return ans
def A__ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ : List[str] = np.chararray((n, n) )
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
snake_case__ : Any = "*"
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (j, (n - 1) - i) in blocks:
snake_case__ : Optional[Any] = "#"
snake_case__ : Optional[Any] = "-"
snake_case__ : Optional[int] = back_pointer[goal]
while x != start:
(snake_case__) : str = x
# print(x)
snake_case__ : str = "-"
snake_case__ : int = back_pointer[x]
snake_case__ : Optional[Any] = "-"
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
snake_case__ : Union[str, Any] = back_pointer[goal]
while x != start:
print(_UpperCAmelCase , end=" " )
snake_case__ : List[Any] = back_pointer[x]
print(_UpperCAmelCase )
sys.exit()
def A__ ( _UpperCAmelCase : TPos ) -> int:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def A__ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , ) -> Optional[Any]:
'''simple docstring'''
for itera in range(_UpperCAmelCase ):
open_list[itera].remove_element(_UpperCAmelCase )
# print("s", s)
# print("j", j)
(snake_case__) : Union[str, Any] = s
snake_case__ : List[str] = (x - 1, y)
snake_case__ : List[Any] = (x + 1, y)
snake_case__ : List[Any] = (x, y + 1)
snake_case__ : Tuple = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCAmelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCAmelCase )
snake_case__ : Dict = -1
snake_case__ : List[Any] = float("inf" )
if valid(_UpperCAmelCase ) and g_function[neighbours] > g_function[s] + 1:
snake_case__ : List[Any] = g_function[s] + 1
snake_case__ : Any = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCAmelCase , key(_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCAmelCase ):
if key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) <= Wa * key(
_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ):
open_list[j].put(
_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
def A__ ( ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowercase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowercase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
lowercase = make_common_ground()
lowercase = blocks_blk
# hyper parameters
lowercase = 1
lowercase = 1
lowercase = 20
lowercase = 3 # one consistent and two other inconsistent
# start and end destination
lowercase = (0, 0)
lowercase = (n - 1, n - 1)
lowercase = 1
def A__ ( _UpperCAmelCase : TPos , _UpperCAmelCase : TPos , _UpperCAmelCase : int ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = {start: 0, goal: float("inf" )}
snake_case__ : Dict = {start: -1, goal: -1}
snake_case__ : str = []
snake_case__ : Optional[Any] = set()
for i in range(_UpperCAmelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
snake_case__ : list[int] = []
snake_case__ : list[int] = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , _UpperCAmelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
snake_case__ : int = open_list[i].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
close_list_inad.append(_UpperCAmelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
snake_case__ : str = open_list[0].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
close_list_anchor.append(_UpperCAmelCase )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCAmelCase ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 715 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowercase = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowercase = {"""facebook/blenderbot_small-90M""": 512}
def A__ ( _UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
snake_case__ : List[str] = set()
snake_case__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : Any = char
snake_case__ : List[str] = set(_UpperCAmelCase )
return pairs
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="__start__" , lowerCamelCase__="__end__" , lowerCamelCase__="__unk__" , lowerCamelCase__="__null__" , **lowerCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , **lowerCamelCase__)
with open(lowerCamelCase__ , encoding="utf-8") as vocab_handle:
snake_case__ : int = json.load(lowerCamelCase__)
snake_case__ : List[str] = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase__ , encoding="utf-8") as merges_handle:
snake_case__ : Any = merges_handle.read().split("\n")[1:-1]
snake_case__ : Optional[int] = [tuple(merge.split()) for merge in merges]
snake_case__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__))))
snake_case__ : List[str] = {}
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return len(self.encoder)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self , lowerCamelCase__) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case__ : Tuple = re.sub("([.,!?()])" , R" \1" , lowerCamelCase__)
snake_case__ : List[Any] = re.sub("(')" , R" \1 " , lowerCamelCase__)
snake_case__ : Dict = re.sub(R"\s{2,}" , " " , lowerCamelCase__)
if "\n" in token:
snake_case__ : Tuple = token.replace("\n" , " __newln__")
snake_case__ : Optional[int] = token.split(" ")
snake_case__ : int = []
for token in tokens:
if not len(lowerCamelCase__):
continue
snake_case__ : str = token.lower()
snake_case__ : List[str] = tuple(lowerCamelCase__)
snake_case__ : str = tuple(list(word[:-1]) + [word[-1] + "</w>"])
snake_case__ : Optional[int] = get_pairs(lowerCamelCase__)
if not pairs:
words.append(lowerCamelCase__)
continue
while True:
snake_case__ : int = min(lowerCamelCase__ , key=lambda lowerCamelCase__: self.bpe_ranks.get(lowerCamelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Any = bigram
snake_case__ : Optional[int] = []
snake_case__ : str = 0
while i < len(lowerCamelCase__):
try:
snake_case__ : Any = word.index(lowerCamelCase__ , lowerCamelCase__)
new_word.extend(word[i:j])
snake_case__ : Tuple = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowerCamelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
snake_case__ : Optional[int] = tuple(lowerCamelCase__)
snake_case__ : str = new_word
if len(lowerCamelCase__) == 1:
break
else:
snake_case__ : Optional[int] = get_pairs(lowerCamelCase__)
snake_case__ : Tuple = "@@ ".join(lowerCamelCase__)
snake_case__ : Union[str, Any] = word[:-4]
snake_case__ : Any = word
words.append(lowerCamelCase__)
return " ".join(lowerCamelCase__)
def UpperCAmelCase ( self , lowerCamelCase__) -> List[str]:
'''simple docstring'''
snake_case__ : Any = []
snake_case__ : Union[str, Any] = re.findall(R"\S+\n?" , lowerCamelCase__)
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase__).split(" ")))
return split_tokens
def UpperCAmelCase ( self , lowerCamelCase__) -> int:
'''simple docstring'''
snake_case__ : str = token.lower()
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self , lowerCamelCase__) -> str:
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ , self.unk_token)
def UpperCAmelCase ( self , lowerCamelCase__) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = " ".join(lowerCamelCase__).replace("@@ " , "").strip()
return out_string
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
snake_case__ : int = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
snake_case__ : Tuple = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCamelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__) + "\n")
snake_case__ : str = 0
with open(lowerCamelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!")
snake_case__ : Tuple = token_index
writer.write(" ".join(lowerCamelCase__) + "\n")
index += 1
return vocab_file, merge_file
| 150 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = 42
snake_case_ = 42
def __init__( self : List[Any] ,A : UNetaDModel ,A : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=A ,scheduler=A )
@torch.no_grad()
def __call__( self : int ,A : int = 1 ,A : int = 20_00 ,A : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A : Optional[str] = "pil" ,A : bool = True ,**A : Optional[int] ,):
__A = self.unet.config.sample_size
__A = (batch_size, 3, img_size, img_size)
__A = self.unet
__A = randn_tensor(A ,generator=A ) * self.scheduler.init_noise_sigma
__A = sample.to(self.device )
self.scheduler.set_timesteps(A )
self.scheduler.set_sigmas(A )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__A = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__A = self.unet(A ,A ).sample
__A = self.scheduler.step_correct(A ,A ,generator=A ).prev_sample
# prediction step
__A = model(A ,A ).sample
__A = self.scheduler.step_pred(A ,A ,A ,generator=A )
__A , __A = output.prev_sample, output.prev_sample_mean
__A = sample_mean.clamp(0 ,1 )
__A = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__A = self.numpy_to_pil(A )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=A )
| 55 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a :str = 16
a :Union[str, Any] = 32
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = 16 ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : List[str] = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : str = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Dict = 8
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
return tokenizer.pad(
__lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a :Dict = mocked_dataloaders # noqa: F811
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1":
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
# New Code #
SCREAMING_SNAKE_CASE__ : Optional[int] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Any = config["""lr"""]
SCREAMING_SNAKE_CASE__ : str = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ : List[str] = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ : Any = evaluate.load("""glue""" , """mrpc""" )
set_seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : int = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Any = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = output.loss
accelerator.backward(__lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase )
def _lowercase ( ) -> Any:
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__lowerCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : int = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
__lowercase : Union[str, Any] = 'upernet'
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=512 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=[1, 2, 3, 6] , lowerCAmelCase__=True , lowerCAmelCase__=0.4 , lowerCAmelCase__=384 , lowerCAmelCase__=256 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=255 , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase__: str = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: str = backbone_config.get('model_type' )
lowercase__: Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__: Dict = config_class.from_dict(lowerCAmelCase__ )
lowercase__: List[Any] = backbone_config
lowercase__: Union[str, Any] = hidden_size
lowercase__: Tuple = initializer_range
lowercase__: Optional[int] = pool_scales
lowercase__: Union[str, Any] = use_auxiliary_head
lowercase__: Any = auxiliary_loss_weight
lowercase__: Tuple = auxiliary_in_channels
lowercase__: Optional[Any] = auxiliary_channels
lowercase__: List[Any] = auxiliary_num_convs
lowercase__: List[str] = auxiliary_concat_input
lowercase__: Any = loss_ignore_index
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = copy.deepcopy(self.__dict__ )
lowercase__: List[Any] = self.backbone_config.to_dict()
lowercase__: str = self.__class__.model_type
return output
| 335 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __a :
__lowercase : float
__lowercase : TreeNode | None = None
__lowercase : TreeNode | None = None
def snake_case_ ( snake_case ) -> bool:
# Validation
def is_valid_tree(snake_case ) -> bool:
if node is None:
return True
if not isinstance(snake_case , snake_case ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
snake_case , snake_case , snake_case ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case )
)
return is_binary_search_tree_recursive_check(snake_case , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 1 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_0_0 , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , ):
snake_case__ : Tuple = parent
snake_case__ : Optional[Any] = 1_0_0
snake_case__ : List[Any] = batch_size
snake_case__ : List[str] = image_size
snake_case__ : int = patch_size
snake_case__ : Tuple = num_channels
snake_case__ : List[str] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : int = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Union[str, Any] = scope
snake_case__ : Any = out_indices
snake_case__ : str = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : Optional[Any] = (image_size // patch_size) ** 2
snake_case__ : str = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : int = None
snake_case__ : List[Any] = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = BeitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = BeitForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] = self.type_sequence_label_size
snake_case__ : Any = BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Any = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : Any = 1
snake_case__ : Any = BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = self.num_labels
snake_case__ : str = BeitForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : int = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
snake_case__ : Any = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = config_and_inputs
snake_case__ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[str] = BeitModelTester(self )
snake_case__ : List[Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : int = [*signature.parameters.keys()]
snake_case__ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
if not self.model_tester.is_training:
return
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]:
continue
snake_case__ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : List[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case__ : Optional[int] = False
snake_case__ : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : str = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __UpperCamelCase ( self ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = BeitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self ):
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : Optional[int] = prepare_img()
snake_case__ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values.to(__SCREAMING_SNAKE_CASE )
# prepare bool_masked_pos
snake_case__ : str = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Optional[Any] = model(pixel_values=__SCREAMING_SNAKE_CASE , bool_masked_pos=__SCREAMING_SNAKE_CASE )
snake_case__ : int = outputs.logits
# verify the logits
snake_case__ : Dict = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-2 ) )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = self.default_image_processor
snake_case__ : Tuple = prepare_img()
snake_case__ : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : str = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = outputs.logits
# verify the logits
snake_case__ : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
snake_case__ : List[str] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = outputs.logits
# verify the logits
snake_case__ : str = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
snake_case__ : Union[str, Any] = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
snake_case__ : List[Any] = model.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = BeitImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , size=6_4_0 , do_center_crop=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case__ : int = Image.open(ds[0]["""file"""] )
snake_case__ : Tuple = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : List[str] = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = outputs.logits
# verify the logits
snake_case__ : str = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
snake_case__ : List[str] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=__SCREAMING_SNAKE_CASE , )
else:
snake_case__ : Tuple = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
snake_case__ : str = model.to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = BeitImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , size=6_4_0 , do_center_crop=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case__ : Optional[int] = Image.open(ds[0]["""file"""] )
snake_case__ : List[str] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : int = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = outputs.logits.detach().cpu()
snake_case__ : int = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(5_0_0, 3_0_0)] )
snake_case__ : Tuple = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
| 38 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : List[Any] ) -> Tuple:
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case__ , snake_case__ : Dict = head.next, head
while fast and fast.next:
snake_case__ : Any = fast.next.next
snake_case__ : int = slow.next
snake_case__ : Dict = slow.next
snake_case__ : List[str] = None # Don't forget here! But forget still works!
# reverse the second part
snake_case__ : Tuple = None
while second:
snake_case__ : Tuple = second.next
snake_case__ : Any = node
snake_case__ : str = second
snake_case__ : Optional[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case__ : List[Any] = node.next
snake_case__ : int = head.next
return True
def UpperCamelCase__ ( __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case__ : List[Any] = head
while fast and fast.next:
snake_case__ , snake_case__ : Any = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case__ : Tuple = [slow.val]
while slow.next:
snake_case__ : Optional[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case__ : str = cur.next
return True
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not head or not head.next:
return True
snake_case__ : int = {}
snake_case__ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(__magic_name__ )
else:
snake_case__ : Tuple = [pos]
snake_case__ : Optional[Any] = head.next
pos += 1
snake_case__ : int = pos - 1
snake_case__ : str = 0
for v in d.values():
if len(__magic_name__ ) % 2 != 0:
middle += 1
else:
snake_case__ : List[str] = 0
for i in range(0 , len(__magic_name__ ) ):
if v[i] + v[len(__magic_name__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 38 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCAmelCase__ ( unittest.TestCase, lowercase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : str = load_tool('text-to-speech' )
self.tool.setup()
def A_ ( self ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_lowerCamelCase : str = self.tool('hey' )
_lowerCamelCase : Optional[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def A_ ( self ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = self.tool('hey' )
_lowerCamelCase : Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) ) | 703 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def A_ ( *lowercase , **lowercase ):
pass
@is_pipeline_test
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def A_ ( self ):
_lowerCamelCase : int = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCamelCase : List[str] = image_classifier(lowercase , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowercase ) , [
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}],
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'c'}, {'score': 0.3_33, 'label': 'b'}],
] , )
_lowerCamelCase : Optional[Any] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
],
[
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
],
[
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
],
[
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
],
[
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
],
] , )
@require_tf
def A_ ( self ):
_lowerCamelCase : Optional[Any] = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
_lowerCamelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCamelCase : Optional[int] = image_classifier(lowercase , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(lowercase ) , [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}] , )
_lowerCamelCase : Optional[Any] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
],
[
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
],
[
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
],
[
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
],
[
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
{'score': 0.3_33, 'label': ANY(lowercase )},
],
] , )
@slow
@require_torch
def A_ ( self ):
_lowerCamelCase : Tuple = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_lowerCamelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCamelCase : Optional[Any] = image_classifier(lowercase , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(lowercase ) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
_lowerCamelCase : Dict = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def A_ ( self ):
_lowerCamelCase : List[str] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
_lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCamelCase : Tuple = image_classifier(lowercase , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(lowercase ) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
_lowerCamelCase : Dict = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowercase ) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , ) | 492 | 0 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowerCAmelCase : str =(
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowerCAmelCase : list[int] =[ord(letter) for letter in string.ascii_lowercase]
_lowerCAmelCase : set[int] ={ord(char) for char in VALID_CHARS}
_lowerCAmelCase : list[str] =["the", "be", "to", "of", "and", "in", "that", "have"]
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: str = ""
UpperCAmelCase__: int
UpperCAmelCase__: int
UpperCAmelCase__: int
for keychar, cipherchar in zip(cycle(SCREAMING_SNAKE_CASE ) ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(SCREAMING_SNAKE_CASE )
return decoded
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: list[str] = []
for key in product(SCREAMING_SNAKE_CASE ,repeat=3 ):
UpperCAmelCase__: Optional[int] = try_key(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
if encoded is not None:
possibles.append(SCREAMING_SNAKE_CASE )
return possibles
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
return [possible for possible in possibles if common_word in possible.lower()]
def _A ( SCREAMING_SNAKE_CASE = "p059_cipher.txt" ):
UpperCAmelCase__: list[int]
UpperCAmelCase__: list[str]
UpperCAmelCase__: str
UpperCAmelCase__: str
UpperCAmelCase__: str = Path(SCREAMING_SNAKE_CASE ).parent.joinpath(SCREAMING_SNAKE_CASE ).read_text(encoding="utf-8" )
UpperCAmelCase__: int = [int(SCREAMING_SNAKE_CASE ) for number in data.strip().split("," )]
UpperCAmelCase__: int = filter_valid_chars(SCREAMING_SNAKE_CASE )
for common_word in COMMON_WORDS:
UpperCAmelCase__: int = filter_common_word(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 1:
break
UpperCAmelCase__: List[str] = possibles[0]
return sum(ord(SCREAMING_SNAKE_CASE ) for char in decoded_text )
if __name__ == "__main__":
print(F"{solution() = }") | 113 |
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Tuple = int(SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = divmod(SCREAMING_SNAKE_CASE ,2 )
return binary_recursive(SCREAMING_SNAKE_CASE ) + str(SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[str] = str(SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCAmelCase__: Tuple = "-" if number.startswith("-" ) else ""
UpperCAmelCase__: Any = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f"{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod() | 113 | 1 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ : int ) -> list[int]:
__a = []
__a = 2
__a = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
__a = [True] * (end + 1)
__a = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
__a = False
start += 1
prime += in_prime
__a = end + 1
__a = min(2 * end , lowerCAmelCase__ )
while low <= n:
__a = [True] * (high - low + 1)
for each in in_prime:
__a = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
__a = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
__a = high + 1
__a = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(1_0**6))
| 65 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Optional[int] ) -> int:
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def lowercase ( lowerCAmelCase__ : Any ) -> Any:
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a ):
__a = metric_id
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Any = [MetricMock(__SCREAMING_SNAKE_CASE ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def __UpperCAmelCase ( self ):
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple ) -> Optional[int]:
if "tmp_path" in args:
__a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase__ , match='''https://huggingface.co/docs/evaluate''' ):
func(*lowerCAmelCase__ )
| 65 | 1 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowerCamelCase__ ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = XLMProphetNetTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = True
def lowerCAmelCase_ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
a__ = XLMProphetNetTokenizer(a__ ,keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : int ):
a__ = "[PAD]"
a__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) ,a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) ,a__ )
def lowerCAmelCase_ ( self : List[str] ):
a__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"[PAD]" )
self.assertEqual(vocab_keys[1] ,"[CLS]" )
self.assertEqual(vocab_keys[-1] ,"j" )
self.assertEqual(len(a__ ) ,10_12 )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_12 )
def lowerCAmelCase_ ( self : List[Any] ):
a__ = XLMProphetNetTokenizer(a__ ,keep_accents=a__ )
a__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(a__ ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
a__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a__ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] ,)
a__ = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] ,)
a__ = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] ,)
@cached_property
def lowerCAmelCase_ ( self : List[str] ):
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
a__ = "Hello World!"
a__ = [3_53_89, 66_72, 49, 2]
self.assertListEqual(a__ ,self.big_tokenizer.encode(a__ ) )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
# fmt: off
a__ = {"input_ids": [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ ,model_name="microsoft/xprophetnet-large-wiki100-cased" ,revision="1acad1643ddd54a44df6a1b797ada8373685d90e" ,)
| 331 |
'''simple docstring'''
import sys
UpperCamelCase_ : Union[str, Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _lowerCAmelCase (_lowercase = N ):
"""simple docstring"""
a__ = -sys.maxsize - 1
for i in range(len(_lowercase ) - 12 ):
a__ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
a__ = product
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 331 | 1 |
"""simple docstring"""
import math
def _UpperCamelCase ( _A ) -> bool:
"""simple docstring"""
_UpperCAmelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_A )
def _UpperCamelCase ( _A = 1 / 1_2_3_4_5 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 3
while True:
_UpperCAmelCase = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_A ):
_UpperCAmelCase = int(_A )
total_partitions += 1
if check_partition_perfect(_A ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_A )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }") | 19 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( _UpperCAmelCase ):
def _snake_case ( self : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Any ) ->List[str]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : str ) ->Dict:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _snake_case ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self : str ) ->Optional[Any]:
'''simple docstring'''
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__UpperCamelCase ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
_UpperCAmelCase ,_UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __UpperCamelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(_A ) if isinstance(_A , pa.Buffer ) else pa.memory_map(_A )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=_A , features=_A ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_A )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
_UpperCAmelCase = os.path.join(_A , """test.arrow""" )
with ArrowWriter(path=_A , schema=pa.schema(_A ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(_A , 1 )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if pa.types.is_list(_A ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
if isinstance(lst[0] , _A ):
change_first_primitive_element_in_list(lst[0] , _A )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(_A , optimized_int_type=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(_A )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_A , _A )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=_A ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=_A , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_A ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_A )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_A , format="""png""" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=_A , features=Features({"""image""": Image()} ) , embed_local_files=_A ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , _A )
with open(_A , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_A )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=_A ) as writer:
writer._build_writer(inferred_schema=_A )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] ) | 19 | 1 |
"""simple docstring"""
class UpperCamelCase_ :
def __init__( self : str ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = ""
UpperCAmelCase_ : List[Any] = ""
UpperCAmelCase_ : Tuple = []
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCAmelCase_ : List[str] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
UpperCAmelCase_ : str = self.__min_dist_top_down_dp(lowerCAmelCase_ , n - 1 )
UpperCAmelCase_ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.__min_dist_top_down_dp(m - 1 , n - 1 )
UpperCAmelCase_ : str = 1 + min(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return self.dp[m][n]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str ) -> int:
UpperCAmelCase_ : Optional[int] = worda
UpperCAmelCase_ : Union[str, Any] = worda
UpperCAmelCase_ : str = [[-1 for _ in range(len(lowerCAmelCase_ ) )] for _ in range(len(lowerCAmelCase_ ) )]
return self.__min_dist_top_down_dp(len(lowerCAmelCase_ ) - 1 , len(lowerCAmelCase_ ) - 1 )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : str ) -> int:
UpperCAmelCase_ : str = worda
UpperCAmelCase_ : Any = worda
UpperCAmelCase_ : Union[str, Any] = len(lowerCAmelCase_ )
UpperCAmelCase_ : str = len(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCAmelCase_ : int = j
elif j == 0: # second string is empty
UpperCAmelCase_ : List[Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCAmelCase_ : Any = self.dp[i - 1][j - 1]
else:
UpperCAmelCase_ : Union[str, Any] = self.dp[i][j - 1]
UpperCAmelCase_ : List[Any] = self.dp[i - 1][j]
UpperCAmelCase_ : Union[str, Any] = self.dp[i - 1][j - 1]
UpperCAmelCase_ : Union[str, Any] = 1 + min(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return self.dp[m][n]
if __name__ == "__main__":
lowerCamelCase_ = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
lowerCamelCase_ = input('''Enter the first string: ''').strip()
lowerCamelCase_ = input('''Enter the second string: ''').strip()
print()
print(f'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(f'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 95 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> set[str]:
a , a = set(__UpperCamelCase), [start]
while stack:
a = stack.pop()
explored.add(__UpperCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(__UpperCamelCase)
return explored
lowercase__ : int = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 515 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number | (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | '''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 10 | 1 |
from __future__ import annotations
import math
def _snake_case (__lowercase):
if num <= 0:
UpperCamelCase_ = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(__lowercase)
UpperCamelCase_ = [True] * (num + 1)
UpperCamelCase_ = []
UpperCamelCase_ = 2
UpperCamelCase_ = int(math.sqrt(__lowercase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__lowercase)
# Set multiples of start be False
for i in range(start * start , num + 1 , __lowercase):
if sieve[i] is True:
UpperCamelCase_ = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(__lowercase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 23 |
"""simple docstring"""
def _lowerCAmelCase ( ) -> int:
return [
a * b * (1_0_0_0 - a - b)
for a in range(1, 9_9_9 )
for b in range(lowerCamelCase__, 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 572 | 0 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> set[str]:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = set(SCREAMING_SNAKE_CASE_ ), [start]
while stack:
lowerCAmelCase__ : Optional[int] = stack.pop()
explored.add(SCREAMING_SNAKE_CASE_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(SCREAMING_SNAKE_CASE_ )
return explored
lowerCamelCase__ = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A""")) | 69 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = XLMTokenizer
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCAmelCase__ : Any = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : Optional[int] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(a ) )
def _lowerCamelCase ( self : List[str] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = 'lower newer'
lowerCAmelCase__ : Any = 'lower newer'
return input_text, output_text
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase__ : Optional[int] = 'lower'
lowerCAmelCase__ : Optional[Any] = ['low', 'er</w>']
lowerCAmelCase__ : Dict = tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Tuple = tokens + ['<unk>']
lowerCAmelCase__ : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
lowerCAmelCase__ : Any = tokenizer.encode('sequence builders' , add_special_tokens=a )
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=a )
lowerCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(a )
lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 69 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
SCREAMING_SNAKE_CASE = '▁'
# Segments (not really needed)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 4
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = '''left'''
UpperCamelCase_ = XLNetTokenizer
def __init__( self : int , UpperCAmelCase : Dict=None , UpperCAmelCase : str=None , UpperCAmelCase : str=False , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Optional[Any]="<sep>" , UpperCAmelCase : Optional[int]="<pad>" , UpperCAmelCase : Optional[Any]="<cls>" , UpperCAmelCase : Dict="<mask>" , UpperCAmelCase : int=["<eop>", "<eod>"] , **UpperCAmelCase : List[Any] , ) -> List[str]:
'''simple docstring'''
lowercase : Dict =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
lowercase : Tuple =3
lowercase : Union[str, Any] =do_lower_case
lowercase : Any =remove_space
lowercase : int =keep_accents
lowercase : int =vocab_file
lowercase : Union[str, Any] =False if not self.vocab_file else True
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Union[str, Any] =[self.sep_token_id]
lowercase : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[int] =[self.sep_token_id]
lowercase : Union[str, Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A__ ( self : str , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 94 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ : Any = 16
a__ : str = 32
def UpperCAmelCase_ ( _UpperCAmelCase :Accelerator , _UpperCAmelCase :int = 16 ) -> int:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A_ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_UpperCAmelCase :Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
A_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCAmelCase :List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ = 16
elif accelerator.mixed_precision != "no":
A_ = 8
else:
A_ = None
return tokenizer.pad(
_UpperCAmelCase , padding='''longest''' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
A_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a__ : Optional[Any] = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( _UpperCAmelCase :List[str] , _UpperCAmelCase :Dict ) -> Dict:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _UpperCAmelCase ) == "1":
A_ = 2
# New Code #
A_ = int(args.gradient_accumulation_steps )
A_ = int(args.local_sgd_steps )
# Initialize accelerator
A_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ = config['''lr''']
A_ = int(config['''num_epochs'''] )
A_ = int(config['''seed'''] )
A_ = int(config['''batch_size'''] )
A_ = evaluate.load('''glue''' , '''mrpc''' )
set_seed(_UpperCAmelCase )
A_ , A_ = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ = model.to(accelerator.device )
# Instantiate optimizer
A_ = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
A_ = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
with LocalSGD(
accelerator=_UpperCAmelCase , model=_UpperCAmelCase , local_sgd_steps=_UpperCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCAmelCase ):
A_ = model(**_UpperCAmelCase )
A_ = output.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A_ = model(**_UpperCAmelCase )
A_ = outputs.logits.argmax(dim=-1 )
A_ , A_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
A_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , _UpperCAmelCase )
def UpperCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
A_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_UpperCAmelCase , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=_UpperCAmelCase , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A_ = parser.parse_args()
A_ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 188 | 0 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
UpperCAmelCase: Union[str, Any] = datasets.logging.get_logger(__name__)
UpperCAmelCase: Tuple = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
UpperCAmelCase: Tuple = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
UpperCAmelCase: Union[str, Any] = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://unbabel.github.io/COMET/html/index.html""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""sources""": datasets.Value("""string""" ,id="""sequence""" ),
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/Unbabel/COMET"""] ,reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] ,)
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if self.config_name == "default":
_lowercase : Any = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
_lowercase : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=None ,UpperCAmelCase_=False ):
if gpus is None:
_lowercase : Optional[Any] = 1 if torch.cuda.is_available() else 0
_lowercase : str = {"""src""": sources, """mt""": predictions, """ref""": references}
_lowercase : Optional[Any] = [dict(zip(UpperCAmelCase_ ,UpperCAmelCase_ ) ) for t in zip(*data.values() )]
_lowercase : Optional[Any] = self.scorer.predict(UpperCAmelCase_ ,gpus=UpperCAmelCase_ ,progress_bar=UpperCAmelCase_ )
return {"mean_score": mean_score, "scores": scores}
| 716 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCAmelCase: str = """base_with_context"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Dict = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
_lowercase : Any = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowercase : Optional[Any] = weights[F"""layers_{lyr_num}"""]
_lowercase : str = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_lowercase : Optional[Any] = ly_weight["""attention"""]
_lowercase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowercase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowercase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowercase : int = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_lowercase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : int = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
_lowercase : Optional[int] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowercase : int = weights[F"""layers_{lyr_num}"""]
_lowercase : Any = ly_weight["""attention"""]
_lowercase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowercase : str = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowercase : Any = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowercase : Any = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowercase : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_lowercase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_lowercase : str = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Dict = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
_lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
_lowercase : Optional[int] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
_lowercase : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_lowercase : List[Any] = weights[F"""layers_{lyr_num}"""]
_lowercase : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
_lowercase : int = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_lowercase : List[Any] = ly_weight["""self_attention"""]
_lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowercase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowercase : Union[str, Any] = ly_weight["""MultiHeadDotProductAttention_0"""]
_lowercase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowercase : Any = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowercase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
_lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_lowercase : Any = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_lowercase : str = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_lowercase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_lowercase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_lowercase : str = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
_lowercase : Any = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_lowercase : List[Any] = jnp.tree_util.tree_map(onp.array , __UpperCAmelCase )
_lowercase : int = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
_lowercase : List[Any] = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
_lowercase : Optional[int] = inference.parse_training_gin_file(__UpperCAmelCase , __UpperCAmelCase )
_lowercase : Optional[Any] = inference.InferenceModel(args.checkpoint_path , __UpperCAmelCase )
_lowercase : List[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
_lowercase : List[str] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
_lowercase : Dict = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
_lowercase : int = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_lowercase : str = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , __UpperCAmelCase )
_lowercase : Dict = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , __UpperCAmelCase )
_lowercase : List[str] = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , __UpperCAmelCase )
_lowercase : Any = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
_lowercase : str = SpectrogramDiffusionPipeline(
notes_encoder=__UpperCAmelCase , continuous_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase , scheduler=__UpperCAmelCase , melgan=__UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'{MODEL}/checkpoint_500000',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
UpperCAmelCase: Optional[Any] = parser.parse_args()
main(args)
| 600 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class SCREAMING_SNAKE_CASE_ ( _a ):
'''simple docstring'''
lowercase : Dict = """Wav2Vec2FeatureExtractor"""
lowercase : Tuple = """AutoTokenizer"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =self.feature_extractor
A : Dict =False
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
try:
return super().from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
except OSError:
warnings.warn(
f'Loading a tokenizer inside {cls.__name__} from a config that does not'
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , SCREAMING_SNAKE_CASE__ , )
A : Optional[Any] =WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : int =WavaVecaCTCTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return cls(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
def __call__( self : int , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
A : Any =kwargs.pop('raw_speech' )
else:
A : Union[str, Any] =kwargs.pop('audio' , SCREAMING_SNAKE_CASE__ )
A : int =kwargs.pop('sampling_rate' , SCREAMING_SNAKE_CASE__ )
A : Optional[int] =kwargs.pop('text' , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
A : Optional[int] =args[0]
A : Union[str, Any] =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
A : Union[str, Any] =self.feature_extractor(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None:
A : List[str] =self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A : Any =encodings['input_ids']
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Any ) -> int:
if self._in_target_context_manager:
return self.current_processor.pad(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Optional[int] =kwargs.pop('input_features' , SCREAMING_SNAKE_CASE__ )
A : Dict =kwargs.pop('labels' , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
A : List[str] =args[0]
A : List[Any] =args[1:]
if input_features is not None:
A : Optional[int] =self.feature_extractor.pad(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if labels is not None:
A : Dict =self.tokenizer.pad(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A : Any =labels['input_ids']
return input_features
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[int]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
A : Optional[int] =True
A : int =self.tokenizer
yield
A : Any =self.feature_extractor
A : List[Any] =False
| 305 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
SCREAMING_SNAKE_CASE : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
_lowerCamelCase : Dict = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
_lowerCamelCase : List[str] = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def _UpperCAmelCase (UpperCamelCase_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowerCAmelCase : Optional[int] = numpy_to_pil(UpperCamelCase_ )
return images
def _UpperCAmelCase (UpperCamelCase_ : Any ):
'''simple docstring'''
if images.ndim == 3:
_lowerCAmelCase : int = images[None, ...]
_lowerCAmelCase : List[Any] = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_lowerCAmelCase : Optional[int] = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
_lowerCAmelCase : List[str] = [Image.fromarray(UpperCamelCase_ ) for image in images]
return pil_images
| 196 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Tuple=10 , _UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] , _UpperCAmelCase : int=[1, 1, 2, 1] , _UpperCAmelCase : str=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str="relu" , _UpperCAmelCase : int=3 , _UpperCAmelCase : int=None , ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : List[str] = embeddings_size
_lowerCAmelCase : int = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : int = is_training
_lowerCAmelCase : Dict = use_labels
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Dict = num_labels
_lowerCAmelCase : List[Any] = scope
_lowerCAmelCase : Optional[int] = len(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : Any = TFResNetForImageClassification(_UpperCAmelCase )
_lowerCAmelCase : str = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = config_and_inputs
_lowerCAmelCase : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case (_a , _a , unittest.TestCase ):
lowerCAmelCase__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase__ = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFResNetModelTester(self )
_lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
_lowerCAmelCase : Dict = model_class(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_lowerCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : str = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Tuple = layer_type
_lowerCAmelCase : Any = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Dict = TFResNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Optional[int] = self.default_image_processor
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : int = image_processor(images=_UpperCAmelCase , return_tensors="""tf""" )
# forward pass
_lowerCAmelCase : int = model(**_UpperCAmelCase )
# verify the logits
_lowerCAmelCase : Union[str, Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_lowerCAmelCase : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _UpperCAmelCase , atol=1E-4 ) )
| 196 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case_ (__A : Optional[int] ) -> Dict:
__lowerCAmelCase : int = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 1_8, 2]
__lowerCAmelCase : List[Any] = True if """large""" in model_name or """huge""" in model_name else False
__lowerCAmelCase : Optional[int] = True if """large""" in model_name or """huge""" in model_name else False
__lowerCAmelCase : Optional[int] = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCAmelCase : Union[str, Any] = [3, 3, 3, 3]
__lowerCAmelCase : Dict = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCAmelCase : str = [4, 4, 4, 4]
__lowerCAmelCase : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCAmelCase : Any = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCAmelCase : List[Any] = [3, 3, 3, 3]
else:
__lowerCAmelCase : str = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCAmelCase : List[str] = 9_6
elif "small" in model_name:
__lowerCAmelCase : Dict = 9_6
elif "base" in model_name:
__lowerCAmelCase : int = 1_2_8
elif "large" in model_name:
__lowerCAmelCase : Dict = 1_9_2
elif "xlarge" in model_name:
__lowerCAmelCase : List[str] = 2_5_6
elif "huge" in model_name:
__lowerCAmelCase : int = 3_5_2
# set label information
__lowerCAmelCase : List[str] = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
__lowerCAmelCase : Any = """imagenet-22k-id2label.json"""
else:
__lowerCAmelCase : Optional[int] = """imagenet-1k-id2label.json"""
__lowerCAmelCase : str = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase : Optional[Any] = {int(__A ): v for k, v in idalabel.items()}
__lowerCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__lowerCAmelCase : List[str] = FocalNetConfig(
embed_dim=__A , depths=__A , focal_levels=__A , focal_windows=__A , use_conv_embed=__A , idalabel=__A , labelaid=__A , use_post_layernorm=__A , use_layerscale=__A , )
return config
def snake_case_ (__A : Optional[Any] ) -> Dict:
if "patch_embed.proj" in name:
__lowerCAmelCase : Tuple = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase : int = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__lowerCAmelCase : Dict = """encoder.""" + name
if "encoder.layers" in name:
__lowerCAmelCase : int = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
__lowerCAmelCase : Dict = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
__lowerCAmelCase : int = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCAmelCase : List[str] = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCAmelCase : Tuple = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCAmelCase : Optional[Any] = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
__lowerCAmelCase : Dict = """layernorm.weight"""
if name == "norm.bias":
__lowerCAmelCase : Optional[int] = """layernorm.bias"""
if "head" in name:
__lowerCAmelCase : Dict = name.replace("""head""" , """classifier""" )
else:
__lowerCAmelCase : List[Any] = """focalnet.""" + name
return name
def snake_case_ (__A : List[str] , __A : Union[str, Any] , __A : List[Any]=False ) -> Optional[Any]:
# fmt: off
__lowerCAmelCase : Optional[int] = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
__lowerCAmelCase : List[Any] = model_name_to_url[model_name]
print("""Checkpoint URL: """ , __A )
__lowerCAmelCase : Dict = torch.hub.load_state_dict_from_url(__A , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase : Union[str, Any] = state_dict.pop(__A )
__lowerCAmelCase : List[Any] = val
__lowerCAmelCase : str = get_focalnet_config(__A )
__lowerCAmelCase : List[Any] = FocalNetForImageClassification(__A )
model.eval()
# load state dict
model.load_state_dict(__A )
# verify conversion
__lowerCAmelCase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase : Optional[Any] = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": 2_5_6} , resample=PILImageResampling.BILINEAR , do_center_crop=__A , crop_size=2_2_4 , do_normalize=__A , image_mean=__A , image_std=__A , )
__lowerCAmelCase : Tuple = Image.open(requests.get(__A , stream=__A ).raw )
__lowerCAmelCase : Optional[int] = processor(images=__A , return_tensors="""pt""" )
__lowerCAmelCase : Dict = transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__lowerCAmelCase : List[Any] = image_transforms(__A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __A , atol=1e-4 )
__lowerCAmelCase : List[Any] = model(**__A )
__lowerCAmelCase : Tuple = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCAmelCase : List[Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
__lowerCAmelCase : str = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
__lowerCAmelCase : Optional[int] = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
__lowerCAmelCase : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
__lowerCAmelCase : int = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
__lowerCAmelCase : Union[str, Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
__UpperCAmelCase = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 651 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__UpperCAmelCase = False
@skip_mps
class SCREAMING_SNAKE_CASE ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] =StableDiffusionAttendAndExcitePipeline
lowerCamelCase : List[Any] =False
lowerCamelCase : int =TEXT_TO_IMAGE_PARAMS
lowerCamelCase : List[Any] =TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
lowerCamelCase : Optional[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : Tuple =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ) -> int:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str ) -> List[Any]:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase , )
__lowerCAmelCase : Dict = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
__lowerCAmelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCAmelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCAmelCase : Optional[int] = CLIPTextModel(lowerCAmelCase )
__lowerCAmelCase : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : str=0 ) -> Dict:
"""simple docstring"""
if str(lowerCAmelCase ).startswith("""mps""" ):
__lowerCAmelCase : List[Any] = torch.manual_seed(lowerCAmelCase )
else:
__lowerCAmelCase : Tuple = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
__lowerCAmelCase : str = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = """cpu"""
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : Dict = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = self.get_dummy_inputs(lowerCAmelCase )
__lowerCAmelCase : int = pipe(**lowerCAmelCase ).images
__lowerCAmelCase : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__lowerCAmelCase : Union[str, Any] = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496] )
__lowerCAmelCase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5e-4 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] ) -> List[str]:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] ) -> Tuple:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = torch.manual_seed(51 )
__lowerCAmelCase : Dict = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
__lowerCAmelCase : int = """a painting of an elephant with glasses"""
__lowerCAmelCase : Dict = [5, 7]
__lowerCAmelCase : Union[str, Any] = pipe(
prompt=lowerCAmelCase , token_indices=lowerCAmelCase , guidance_scale=7.5 , generator=lowerCAmelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
__lowerCAmelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 651 | 1 |
def _UpperCAmelCase ( UpperCamelCase: list , UpperCamelCase: list ):
"""simple docstring"""
_validate_point(UpperCamelCase )
_validate_point(UpperCamelCase )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase , UpperCamelCase ) ) )
def _UpperCAmelCase ( UpperCamelCase: list[float] ):
"""simple docstring"""
if point:
if isinstance(UpperCamelCase , UpperCamelCase ):
for item in point:
if not isinstance(UpperCamelCase , (int, float) ):
__lowerCAmelCase = (
"Expected a list of numbers as input, found "
F"{type(UpperCamelCase ).__name__}"
)
raise TypeError(UpperCamelCase )
else:
__lowerCAmelCase = F"Expected a list of numbers as input, found {type(UpperCamelCase ).__name__}"
raise TypeError(UpperCamelCase )
else:
raise ValueError("Missing an input" )
def _UpperCAmelCase ( UpperCamelCase: list , UpperCamelCase: list ):
"""simple docstring"""
_validate_point(UpperCamelCase )
_validate_point(UpperCamelCase )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase , UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( snake_case__ : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
raise NotImplementedError()
| 376 | 1 |
SCREAMING_SNAKE_CASE__ : Dict = [0, 2, 4, 6, 8]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [1, 3, 5, 7, 9]
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
UpperCAmelCase__ : Optional[int] = 0
for digit in range(10 ):
UpperCAmelCase__ : Dict = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __lowerCamelCase , __lowerCamelCase )
return result
UpperCAmelCase__ : Dict = 0
for digita in range(10 ):
UpperCAmelCase__ : Union[str, Any] = digita
if (remainder + digita) % 2 == 0:
UpperCAmelCase__ : Any = ODD_DIGITS
else:
UpperCAmelCase__ : Optional[Any] = EVEN_DIGITS
for digita in other_parity_digits:
UpperCAmelCase__ : Optional[Any] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __lowerCamelCase , __lowerCamelCase , )
return result
def _lowerCamelCase ( __lowerCamelCase = 9 ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__lowerCamelCase , 0 , [0] * length , __lowerCamelCase )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 79 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any,__A : Any,__A : Any ):
_lowerCamelCase : List[Any] = name
_lowerCamelCase : Union[str, Any] = value
_lowerCamelCase : str = weight
def __repr__( self : Any ):
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self : Optional[int] ):
return self.value
def lowerCamelCase_ ( self : Any ):
return self.name
def lowerCamelCase_ ( self : List[Any] ):
return self.weight
def lowerCamelCase_ ( self : str ):
return self.value / self.weight
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod() | 44 | 0 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[int] = np.full((len(_SCREAMING_SNAKE_CASE ), sequence_length, 2) , _SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[int] = np.full((len(_SCREAMING_SNAKE_CASE ), sequence_length) , _SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(_SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__UpperCamelCase :int = tensor[:sequence_length]
else:
__UpperCamelCase :List[str] = tensor[:sequence_length]
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Tuple = tensor[:sequence_length]
else:
__UpperCamelCase :Optional[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = ord(_SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__UpperCamelCase :List[str] = unicodedata.category(_SCREAMING_SNAKE_CASE )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : PreTrainedTokenizerBase
a__ : Union[bool, str, PaddingStrategy] = True
a__ : Optional[int] = None
a__ : Optional[int] = None
a__ : int = -1_0_0
a__ : str = "pt"
def UpperCamelCase__ ( self , __lowercase) -> str:
import torch
__UpperCamelCase :Tuple = '''label''' if '''label''' in features[0].keys() else '''labels'''
__UpperCamelCase :Tuple = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__UpperCamelCase :str = self.tokenizer.pad(
__lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
__UpperCamelCase :int = torch.tensor(batch['''entity_ids''']).shape[1]
__UpperCamelCase :Optional[Any] = self.tokenizer.padding_side
if padding_side == "right":
__UpperCamelCase :List[str] = [
list(__lowercase) + [self.label_pad_token_id] * (sequence_length - len(__lowercase)) for label in labels
]
else:
__UpperCamelCase :Tuple = [
[self.label_pad_token_id] * (sequence_length - len(__lowercase)) + list(__lowercase) for label in labels
]
__UpperCamelCase :Tuple = [feature['''ner_tags'''] for feature in features]
__UpperCamelCase :int = padding_tensor(__lowercase , -1 , __lowercase , __lowercase)
__UpperCamelCase :str = [feature['''original_entity_spans'''] for feature in features]
__UpperCamelCase :Union[str, Any] = padding_tensor(__lowercase , (-1, -1) , __lowercase , __lowercase)
__UpperCamelCase :Optional[Any] = {k: torch.tensor(__lowercase , dtype=torch.intaa) for k, v in batch.items()}
return batch
| 701 | def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return 1 if input_a == input_a else 0
def lowerCamelCase ( ):
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 452 | 0 |
from __future__ import annotations
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
A : Tuple = get_failure_array(lowerCamelCase_ )
# 2) Step through text searching for pattern
A , A : Dict = 0, 0 # index into text, pattern
while i < len(lowerCamelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCamelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
A : Any = failure[j - 1]
continue
i += 1
return False
def snake_case__ ( lowerCamelCase_ ):
A : Optional[Any] = [0]
A : Optional[int] = 0
A : Union[str, Any] = 1
while j < len(lowerCamelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
A : Tuple = failure[i - 1]
continue
j += 1
failure.append(lowerCamelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowercase : List[str] = "abc1abc12"
lowercase : Tuple = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowercase : Tuple = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowercase : List[str] = "ABABX"
lowercase : Optional[int] = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
lowercase : Tuple = "AAAB"
lowercase : Tuple = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
lowercase : List[Any] = "abcdabcy"
lowercase : Union[str, Any] = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
lowercase : Optional[Any] = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 542 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowercase : Dict = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __lowercase ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase_ : Optional[datasets.Features] = None
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , ):
import pyspark
def generate_fn():
A : Dict = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
A : Optional[Any] = df_with_partition_id.select('''*''' ).where(F'part_id = {partition_id}' ).drop('''part_id''' )
A : Dict = partition_df.collect()
A : List[str] = 0
for row in rows:
yield F'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class __lowercase ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , ) -> List[str]:
A : List[Any] = df
A : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
A : Optional[int] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> str:
yield from self.generate_examples_fn()
def snake_case ( self , __UpperCAmelCase ) -> "SparkExamplesIterable":
A : Tuple = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__UpperCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> "SparkExamplesIterable":
A : Tuple = self.split_shard_indices_by_worker(__UpperCAmelCase , __UpperCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__UpperCAmelCase )
@property
def snake_case ( self ) -> int:
return len(self.partition_order )
class __lowercase ( datasets.DatasetBuilder ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = SparkConfig
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]:
import pyspark
A : Tuple = pyspark.sql.SparkSession.builder.getOrCreate()
A : List[Any] = df
A : List[Any] = working_dir
super().__init__(
cache_dir=__UpperCAmelCase , config_name=str(self.df.semanticHash() ) , **__UpperCAmelCase , )
def snake_case ( self ) -> Optional[Any]:
# Returns the path of the created file.
def create_cache_and_write_probe(__UpperCAmelCase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__UpperCAmelCase )
A : Optional[Any] = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__UpperCAmelCase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
A : Optional[int] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__UpperCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def snake_case ( self ) -> List[Any]:
return datasets.DatasetInfo(features=self.config.features )
def snake_case ( self , __UpperCAmelCase ) -> str:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def snake_case ( self , __UpperCAmelCase ) -> List[Any]:
import pyspark
def get_arrow_batch_size(__UpperCAmelCase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
A : Any = self.df.count()
A : Tuple = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
A : Dict = (
self.df.limit(__UpperCAmelCase )
.repartition(1 )
.mapInArrow(__UpperCAmelCase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
A : str = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
A : Union[str, Any] = min(__UpperCAmelCase , int(approx_total_size / max_shard_size ) )
A : List[Any] = self.df.repartition(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
A : Dict = ParquetWriter if file_format == '''parquet''' else ArrowWriter
A : Optional[Any] = os.path.join(self._working_dir , os.path.basename(__UpperCAmelCase ) ) if self._working_dir else fpath
A : int = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
A : Any = self.config.features
A : Any = self._writer_batch_size
A : int = self._fs.storage_options
def write_arrow(__UpperCAmelCase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
A : List[Any] = pyspark.TaskContext().taskAttemptId()
A : Union[str, Any] = next(__UpperCAmelCase , __UpperCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
A : Optional[int] = 0
A : Optional[int] = writer_class(
features=__UpperCAmelCase , path=working_fpath.replace('''SSSSS''' , f'{shard_id:05d}' ).replace('''TTTTT''' , f'{task_id:05d}' ) , writer_batch_size=__UpperCAmelCase , storage_options=__UpperCAmelCase , embed_local_files=__UpperCAmelCase , )
A : Tuple = pa.Table.from_batches([first_batch] )
writer.write_table(__UpperCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
A , A : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
A : List[str] = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f'{shard_id:05d}' ).replace('''TTTTT''' , f'{task_id:05d}' ) , writer_batch_size=__UpperCAmelCase , storage_options=__UpperCAmelCase , embed_local_files=__UpperCAmelCase , )
A : str = pa.Table.from_batches([batch] )
writer.write_table(__UpperCAmelCase )
if writer._num_bytes > 0:
A , A : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__UpperCAmelCase ) ):
A : Union[str, Any] = os.path.join(os.path.dirname(__UpperCAmelCase ) , os.path.basename(__UpperCAmelCase ) )
shutil.move(__UpperCAmelCase , __UpperCAmelCase )
A : Union[str, Any] = (
self.df.mapInArrow(__UpperCAmelCase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = "arrow" , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> List[str]:
self._validate_cache_dir()
A : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__UpperCAmelCase )
A : Any = not is_remote_filesystem(self._fs )
A : Union[str, Any] = os.path.join if is_local else posixpath.join
A : Any = '''-TTTTT-SSSSS-of-NNNNN'''
A : Any = f'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
A : List[Any] = path_join(self._output_dir , __UpperCAmelCase )
A : Union[str, Any] = 0
A : Any = 0
A : Tuple = 0
A : Union[str, Any] = []
A : List[str] = []
for task_id, content in self._prepare_split_single(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__UpperCAmelCase )
A : List[str] = total_num_examples
A : List[str] = total_num_bytes
# should rename everything at the end
logger.debug(f'Renaming {total_shards} shards.' )
if total_shards > 1:
A : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
A : Union[str, Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
rename(
__UpperCAmelCase , fpath.replace('''SSSSS''' , f'{shard_id:05d}' ).replace('''TTTTT''' , f'{task_id:05d}' ) , fpath.replace('''TTTTT-SSSSS''' , f'{global_shard_id:05d}' ).replace('''NNNNN''' , f'{total_shards:05d}' ) , )
A : Tuple = []
A : Optional[int] = 0
for i in range(len(__UpperCAmelCase ) ):
A , A : Tuple = task_id_and_num_shards[i]
for shard_id in range(__UpperCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__UpperCAmelCase , len(__UpperCAmelCase ) ).map(lambda __UpperCAmelCase : _rename_shard(*__UpperCAmelCase ) ).collect()
else:
# don't use any pattern
A : int = 0
A : Tuple = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f'{shard_id:05d}' ).replace('''TTTTT''' , f'{task_id:05d}' ) , fpath.replace(__UpperCAmelCase , '''''' ) , )
def snake_case ( self , __UpperCAmelCase , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 542 | 1 |
def snake_case__ ( __lowercase , __lowercase ) -> bool:
"""simple docstring"""
A__ : Any = len(__lowercase ) + 1
A__ : Dict = len(__lowercase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
A__ : List[str] = [[0 for i in range(__lowercase )] for j in range(__lowercase )]
# since string of zero length match pattern of zero length
A__ : int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowercase ):
A__ : int = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowercase ):
A__ : Dict = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowercase ):
for j in range(1 , __lowercase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
A__ : Union[str, Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
A__ : str = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
A__ : Tuple = dp[i - 1][j]
else:
A__ : Dict = 0
else:
A__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
snake_case : List[Any] ='aab'
snake_case : Optional[int] ='c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""") | 715 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
def get_masked_lm_array(__lowercase ):
A__ : int = F'masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'
A__ : Dict = tf.train.load_variable(__lowercase , __lowercase )
if "kernel" in name:
A__ : Any = array.transpose()
return torch.from_numpy(__lowercase )
def get_encoder_array(__lowercase ):
A__ : Dict = F'encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'
A__ : Tuple = tf.train.load_variable(__lowercase , __lowercase )
if "kernel" in name:
A__ : str = array.transpose()
return torch.from_numpy(__lowercase )
def get_encoder_layer_array(__lowercase , __lowercase ):
A__ : int = F'encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'
A__ : Dict = tf.train.load_variable(__lowercase , __lowercase )
if "kernel" in name:
A__ : Any = array.transpose()
return torch.from_numpy(__lowercase )
def get_encoder_attention_layer_array(__lowercase , __lowercase , __lowercase ):
A__ : List[str] = F'encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'
A__ : List[Any] = tf.train.load_variable(__lowercase , __lowercase )
A__ : int = array.reshape(__lowercase )
if "kernel" in name:
A__ : Union[str, Any] = array.transpose()
return torch.from_numpy(__lowercase )
print(F'Loading model based on config from {config_path}...' )
A__ : Tuple = BertConfig.from_json_file(__lowercase )
A__ : List[str] = BertForMaskedLM(__lowercase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
A__ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
A__ : BertSelfAttention = layer.attention.self
A__ : Optional[Any] = get_encoder_attention_layer_array(
__lowercase , "_query_dense/kernel" , self_attn.query.weight.data.shape )
A__ : List[Any] = get_encoder_attention_layer_array(
__lowercase , "_query_dense/bias" , self_attn.query.bias.data.shape )
A__ : int = get_encoder_attention_layer_array(
__lowercase , "_key_dense/kernel" , self_attn.key.weight.data.shape )
A__ : Tuple = get_encoder_attention_layer_array(
__lowercase , "_key_dense/bias" , self_attn.key.bias.data.shape )
A__ : List[Any] = get_encoder_attention_layer_array(
__lowercase , "_value_dense/kernel" , self_attn.value.weight.data.shape )
A__ : Optional[Any] = get_encoder_attention_layer_array(
__lowercase , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
A__ : BertSelfOutput = layer.attention.output
A__ : Tuple = get_encoder_attention_layer_array(
__lowercase , "_output_dense/kernel" , self_output.dense.weight.data.shape )
A__ : Any = get_encoder_attention_layer_array(
__lowercase , "_output_dense/bias" , self_output.dense.bias.data.shape )
A__ : Dict = get_encoder_layer_array(__lowercase , "_attention_layer_norm/gamma" )
A__ : Optional[int] = get_encoder_layer_array(__lowercase , "_attention_layer_norm/beta" )
# Intermediate
A__ : BertIntermediate = layer.intermediate
A__ : Optional[Any] = get_encoder_layer_array(__lowercase , "_intermediate_dense/kernel" )
A__ : Any = get_encoder_layer_array(__lowercase , "_intermediate_dense/bias" )
# Output
A__ : BertOutput = layer.output
A__ : Dict = get_encoder_layer_array(__lowercase , "_output_dense/kernel" )
A__ : List[Any] = get_encoder_layer_array(__lowercase , "_output_dense/bias" )
A__ : Optional[Any] = get_encoder_layer_array(__lowercase , "_output_layer_norm/gamma" )
A__ : Dict = get_encoder_layer_array(__lowercase , "_output_layer_norm/beta" )
# Embeddings
A__ : str = get_encoder_array("_position_embedding_layer/embeddings" )
A__ : List[str] = get_encoder_array("_type_embedding_layer/embeddings" )
A__ : List[Any] = get_encoder_array("_embedding_norm_layer/gamma" )
A__ : Optional[int] = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
A__ : Optional[Any] = model.cls.predictions.transform
A__ : int = get_masked_lm_array("dense/kernel" )
A__ : Tuple = get_masked_lm_array("dense/bias" )
A__ : Optional[Any] = get_masked_lm_array("layer_norm/gamma" )
A__ : Tuple = get_masked_lm_array("layer_norm/beta" )
A__ : Any = get_masked_lm_array("embedding_table" )
# Pooling
A__ : Optional[Any] = BertPooler(config=__lowercase )
A__ : BertPooler = get_encoder_array("_pooler_layer/kernel" )
A__ : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(__lowercase )
# Integration test - should load without any errors ;)
A__ : Union[str, Any] = BertForMaskedLM.from_pretrained(__lowercase )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
snake_case : str = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
snake_case : Dict = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 182 | 0 |
'''simple docstring'''
import requests
_snake_case : Union[str, Any] = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(f'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 22 | import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
A : Dict = threading.Lock()
A : Optional[logging.Handler] = None
A : Optional[int] = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
A : List[str] = logging.WARNING
A : str = True
def a__ ( ):
SCREAMING_SNAKE_CASE_ = os.getenv("TRANSFORMERS_VERBOSITY" , __UpperCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def a__ ( ):
return __name__.split("." )[0]
def a__ ( ):
return logging.getLogger(_get_library_name() )
def a__ ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE_ = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE_ = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE_ = False
def a__ ( ):
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE_ = None
def a__ ( ):
return log_levels
def a__ ( __UpperCamelCase = None ):
if name is None:
SCREAMING_SNAKE_CASE_ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__UpperCamelCase )
def a__ ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a__ ( __UpperCamelCase ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(__UpperCamelCase )
def a__ ( ):
return set_verbosity(__UpperCamelCase )
def a__ ( ):
return set_verbosity(__UpperCamelCase )
def a__ ( ):
return set_verbosity(__UpperCamelCase )
def a__ ( ):
return set_verbosity(__UpperCamelCase )
def a__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a__ ( __UpperCamelCase ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__UpperCamelCase )
def a__ ( __UpperCamelCase ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__UpperCamelCase )
def a__ ( ):
_configure_library_root_logger()
SCREAMING_SNAKE_CASE_ = False
def a__ ( ):
_configure_library_root_logger()
SCREAMING_SNAKE_CASE_ = True
def a__ ( ):
SCREAMING_SNAKE_CASE_ = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE_ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(__UpperCamelCase )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__UpperCamelCase )
def a__ ( self , *__UpperCamelCase , **__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , __UpperCamelCase )
if no_advisory_warnings:
return
self.warning(*__UpperCamelCase , **__UpperCamelCase )
A : Dict = warning_advice
@functools.lru_cache(__UpperCamelCase )
def a__ ( self , *__UpperCamelCase , **__UpperCamelCase ):
self.warning(*__UpperCamelCase , **__UpperCamelCase )
A : Union[str, Any] = warning_once
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , *__magic_name__ : Optional[int] , **__magic_name__ : int ) -> int: # pylint: disable=unused-argument
SCREAMING_SNAKE_CASE_ = args[0] if args else None
def __iter__( self : str ) -> Optional[int]:
return iter(self._iterator )
def __getattr__( self : Dict , __magic_name__ : int ) -> Optional[int]:
def empty_fn(*__magic_name__ : Tuple , **__magic_name__ : Optional[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ) -> Dict:
return self
def __exit__( self : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] ) -> List[str]:
return
class lowerCamelCase :
"""simple docstring"""
def __call__( self : List[Any] , *__magic_name__ : List[str] , **__magic_name__ : Tuple ) -> Union[str, Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*__magic_name__ , **__magic_name__ )
else:
return EmptyTqdm(*__magic_name__ , **__magic_name__ )
def __A ( self : Tuple , *__magic_name__ : List[Any] , **__magic_name__ : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__magic_name__ , **__magic_name__ )
def __A ( self : Optional[Any] ) -> Dict:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A : Optional[int] = _tqdm_cls()
def a__ ( ):
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE_ = True
hf_hub_utils.enable_progress_bars()
def a__ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE_ = False
hf_hub_utils.disable_progress_bars()
| 140 | 0 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
lowerCAmelCase_ : str = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
lowerCAmelCase_ : Tuple = {
'abeja/gpt-neox-japanese-2.7b': 20_48,
}
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[Any] ) -> Optional[Any]:
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
_a = json.loads(f.read() )
_a = collections.OrderedDict()
_a = collections.OrderedDict()
_a = collections.OrderedDict()
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
_a = f.readlines()
_a = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(_lowerCAmelCase ):
_a = b
_a = idx
for wd in b:
_a = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __SCREAMING_SNAKE_CASE (__UpperCAmelCase ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =["input_ids", "attention_mask"]
def __init__( self : Tuple , __a : Any , __a : Union[str, Any] , __a : Any="<|endoftext|>" , __a : Dict="<|endoftext|>" , __a : Optional[Any]="<|startoftext|>" , __a : Optional[Any]="<|endoftext|>" , __a : List[Any]=False , **__a : Tuple , ):
super().__init__(
unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , do_clean_text=_lowerCamelCase , **_lowerCamelCase , )
if not os.path.isfile(_lowerCamelCase ):
raise ValueError(
f'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(_lowerCamelCase ):
raise ValueError(
f'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
_a = do_clean_text
_a , _a , _a , _a = load_vocab_and_emoji(_lowerCamelCase , _lowerCamelCase )
_a = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def UpperCamelCase__ ( self : Optional[int] ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : List[Any] , __a : int ):
return self.subword_tokenizer.tokenize(_lowerCamelCase , clean=self.do_clean_text )
def UpperCamelCase__ ( self : Optional[int] , __a : Optional[Any] ):
return self.vocab.get(_lowerCamelCase , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self : Tuple , __a : str ):
return self.subword_tokenizer.convert_id_to_token(_lowerCamelCase )
def UpperCamelCase__ ( self : Dict , __a : Optional[Any] ):
_a = "".join(_lowerCamelCase ).strip()
return out_string
def UpperCamelCase__ ( self : Dict , __a : "Conversation" ):
_a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
_a = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase__ ( self : Union[str, Any] , __a : str , __a : Optional[str] = None ):
_a = 0
if os.path.isdir(_lowerCamelCase ):
_a = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
_a = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
_a = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
" Please check that the vocabulary is not corrupted!" )
_a = token_index
writer.write(",".join(_lowerCamelCase ) + "\n" )
index += 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , _lowerCamelCase )
return vocab_file, emoji_file
class __SCREAMING_SNAKE_CASE (__UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , __a : Any , __a : Optional[int] , __a : Optional[int] ):
_a = vocab # same as swe
_a = ids_to_tokens # same as bpe
_a = emoji
_a = np.max([len(_lowerCamelCase ) for w in self.vocab.keys()] )
_a = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
_a = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
_a = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
_a = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
_a = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
_a = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
_a = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
_a = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
_a = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : Optional[int] ):
return len(self.ids_to_tokens )
def UpperCamelCase__ ( self : Dict , __a : Tuple ):
_a = self.content_repattera.sub("<URL>" , _lowerCamelCase )
_a = self.content_repattera.sub("<EMAIL>" , _lowerCamelCase )
_a = self.content_repattera.sub("<TEL>" , _lowerCamelCase )
_a = self.content_repattera.sub("<DATE>" , _lowerCamelCase )
_a = self.content_repattera.sub("<DATE>" , _lowerCamelCase )
_a = self.content_repattera.sub("<PRICE>" , _lowerCamelCase )
_a = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_a = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def UpperCamelCase__ ( self : Union[str, Any] , __a : Dict , __a : Any=False ):
_a = text.replace(" " , "<SP>" )
_a = text.replace(" " , "<SP>" )
_a = text.replace("\r\n" , "<BR>" )
_a = text.replace("\n" , "<BR>" )
_a = text.replace("\r" , "<BR>" )
_a = text.replace("\t" , "<TAB>" )
_a = text.replace("—" , "ー" )
_a = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
_a = text.replace(_lowerCamelCase , _lowerCamelCase )
if clean:
_a = self.clean_text(_lowerCamelCase )
def check_simbol(__a : List[str] ):
_a = x.encode()
if len(_lowerCamelCase ) == 1 and len(_lowerCamelCase ) == 2:
_a = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(__a : Optional[Any] ):
_a = x.encode()
if len(_lowerCamelCase ) == 1 and len(_lowerCamelCase ) == 3:
_a = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
_a = 0
_a = []
while pos < len(_lowerCamelCase ):
_a = min(len(_lowerCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
_a = [] # (token_id, token, pos)
for e in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_a = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_lowerCamelCase ) > 2:
_a = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_lowerCamelCase ) > 0:
# the smallest token_id is adopted
_a , _a , _a = sorted(_lowerCamelCase , key=lambda __a : x[0] )[0]
result.append(_lowerCamelCase )
_a = e
else:
_a = pos + 1
_a = text[pos:end]
if check_simbol(_lowerCamelCase ):
result.append("<KIGOU>" )
elif checkuae(_lowerCamelCase ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
_a = end
return result
def UpperCamelCase__ ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any]="\n" ):
_a = []
_a = []
_a = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_lowerCamelCase ) > 0:
words.append(bytearray(_lowerCamelCase ).decode("utf-8" , errors="replace" ) )
_a = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(_lowerCamelCase )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
words.append(bytearray(_lowerCamelCase ).decode("utf-8" , errors="replace" ) )
_a = "".join(_lowerCamelCase )
return text
| 701 |
'''simple docstring'''
import math
import sys
def _lowerCamelCase ( lowercase : str ) -> str:
_a = ""
try:
with open(lowercase , "rb" ) as binary_file:
_a = binary_file.read()
for dat in data:
_a = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def _lowerCamelCase ( lowercase : str ) -> str:
_a = {"0": "0", "1": "1"}
_a , _a = "", ""
_a = len(lowercase )
for i in range(len(lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_a = lexicon[curr_string]
result += last_match_id
_a = last_match_id + "0"
if math.loga(lowercase ).is_integer():
_a = {}
for curr_key in list(lowercase ):
_a = lexicon.pop(lowercase )
_a = new_lex
_a = last_match_id + "1"
index += 1
_a = ""
return result
def _lowerCamelCase ( lowercase : str , lowercase : str ) -> None:
_a = 8
try:
with open(lowercase , "wb" ) as opened_file:
_a = [
to_write[i : i + byte_length]
for i in range(0 , len(lowercase ) , lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowercase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def _lowerCamelCase ( lowercase : str ) -> str:
_a = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_a = data_bits[counter:]
_a = data_bits[counter + 1 :]
return data_bits
def _lowerCamelCase ( lowercase : str , lowercase : str ) -> None:
_a = read_file_binary(lowercase )
_a = remove_prefix(lowercase )
_a = decompress_data(lowercase )
write_file_binary(lowercase , lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 521 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = 'Hello, World!'
__lowerCAmelCase = 'en_XX'
def _UpperCAmelCase ( __A : str , __A : str , __A : bool ):
a_ : str = Path('''data_bin''' )
a_ : Optional[int] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__A ).parent ) , checkpoint_file=Path(__A ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(__A ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(__A ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(__A )
a_ : Any = xmod.model.encoder.sentence_encoder
a_ : Optional[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
a_ : Optional[int] = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , __A )
a_ : Optional[int] = XmodForSequenceClassification(__A ) if classification_head else XmodForMaskedLM(__A )
model.eval()
# Now let's copy all the weights.
# Embeddings
a_ : Dict = xmod_sent_encoder.embed_tokens.weight
a_ : Any = xmod_sent_encoder.embed_positions.weight
a_ : Any = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
a_ : Optional[int] = xmod_sent_encoder.layernorm_embedding.weight
a_ : Tuple = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
a_ : Union[str, Any] = model.roberta.encoder.layer[i]
a_ : Optional[Any] = xmod_sent_encoder.layers[i]
# self attention
a_ : Union[str, Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
a_ : int = xmod_layer.self_attn.q_proj.weight
a_ : List[str] = xmod_layer.self_attn.q_proj.bias
a_ : List[Any] = xmod_layer.self_attn.k_proj.weight
a_ : List[str] = xmod_layer.self_attn.k_proj.bias
a_ : Tuple = xmod_layer.self_attn.v_proj.weight
a_ : List[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
a_ : int = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
a_ : List[Any] = xmod_layer.self_attn.out_proj.weight
a_ : Optional[int] = xmod_layer.self_attn.out_proj.bias
a_ : str = xmod_layer.self_attn_layer_norm.weight
a_ : int = xmod_layer.self_attn_layer_norm.bias
# intermediate
a_ : int = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
a_ : Any = xmod_layer.fca.weight
a_ : List[Any] = xmod_layer.fca.bias
# output
a_ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
a_ : Optional[Any] = xmod_layer.fca.weight
a_ : Optional[Any] = xmod_layer.fca.bias
a_ : Union[str, Any] = xmod_layer.final_layer_norm.weight
a_ : Tuple = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
a_ : str = xmod_layer.adapter_layer_norm.weight
a_ : List[str] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
a_ : Any = bert_output.adapter_modules[lang_code]
a_ : List[str] = xmod_layer.adapter_modules[lang_code]
a_ : Tuple = from_adapter.fca.weight
a_ : List[str] = from_adapter.fca.bias
a_ : List[str] = from_adapter.fca.weight
a_ : str = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
a_ : int = xmod_sent_encoder.layer_norm.weight
a_ : Optional[int] = xmod_sent_encoder.layer_norm.bias
if classification_head:
a_ : Optional[Any] = xmod.model.classification_heads['''mnli'''].dense.weight
a_ : List[Any] = xmod.model.classification_heads['''mnli'''].dense.bias
a_ : Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
a_ : List[str] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
a_ : List[str] = xmod.model.encoder.lm_head.dense.weight
a_ : List[str] = xmod.model.encoder.lm_head.dense.bias
a_ : Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
a_ : int = xmod.model.encoder.lm_head.layer_norm.bias
a_ : Optional[int] = xmod.model.encoder.lm_head.weight
a_ : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
a_ : List[Any] = xmod.encode(__A ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__A )
a_ : List[str] = model(__A )[0]
if classification_head:
a_ : Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(__A ) )
else:
a_ : Dict = xmod.model(__A , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
a_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
a_ : Optional[Any] = torch.allclose(__A , __A , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(__A ).mkdir(parents=__A , exist_ok=__A )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 466 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case__ = ViTImageProcessor if is_vision_available() else None
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
a_ : Optional[Any] = (3, 32, 128)
a_ : Optional[Any] = tempfile.mkdtemp()
# fmt: off
a_ : Union[str, Any] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
a_ : List[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
a_ : Union[str, Any] = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
a_ : Any = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
a_ : Optional[Any] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
a_ : Optional[int] = Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) )
return image_input
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : str = self.get_tokenizer()
a_ : List[Any] = self.get_image_processor()
a_ : Union[str, Any] = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
a_ : Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
a_ : Any = self.get_tokenizer()
a_ : Dict = self.get_image_processor()
a_ : Optional[Any] = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
a_ : str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a_ : str = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
a_ : Optional[int] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Dict = self.get_image_processor()
a_ : Tuple = self.get_tokenizer()
a_ : Any = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = self.prepare_image_inputs()
a_ : List[Any] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
a_ : int = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : Dict = self.get_image_processor()
a_ : List[Any] = self.get_tokenizer()
a_ : int = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
a_ : Tuple = '''test'''
a_ : Any = processor(text=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : Union[str, Any] = self.get_image_processor()
a_ : Tuple = self.get_tokenizer()
a_ : Optional[Any] = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = '''test'''
a_ : str = self.prepare_image_inputs()
a_ : Tuple = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
a_ : int = self.get_image_processor()
a_ : List[Any] = self.get_tokenizer()
a_ : Dict = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
a_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
a_ : List[Any] = processor.char_decode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
a_ : int = self.get_image_processor()
a_ : Tuple = self.get_tokenizer()
a_ : Any = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
a_ : Dict = None
a_ : Tuple = self.prepare_image_inputs()
a_ : Optional[int] = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
a_ : List[str] = self.get_image_processor()
a_ : str = self.get_tokenizer()
a_ : Any = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
a_ : str = torch.randn(1 , 27 , 38 )
a_ : int = torch.randn(1 , 27 , 5_0257 )
a_ : Union[str, Any] = torch.randn(1 , 27 , 3_0522 )
a_ : Tuple = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 466 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase ( UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[str] ) -> Tuple:
'''simple docstring'''
_a = LxmertConfig.from_json_file(lowercase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
_a = LxmertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 702 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCAmelCase ( UpperCamelCase_: str ) -> Union[str, Any]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a = model_type_to_module_name(UpperCamelCase_ )
_a = importlib.import_module(f'''.{module_name}''' , "transformers.models" )
try:
return getattr(UpperCamelCase_ , UpperCamelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(UpperCamelCase_ , "__name__" , UpperCamelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a = importlib.import_module("transformers" )
if hasattr(UpperCamelCase_ , UpperCamelCase_ ):
return getattr(UpperCamelCase_ , UpperCamelCase_ )
return None
def lowerCAmelCase ( UpperCamelCase_: Union[str, os.PathLike] , UpperCamelCase_: Optional[Union[str, os.PathLike]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[Dict[str, str]] = None , UpperCamelCase_: Optional[Union[bool, str]] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , **UpperCamelCase_: Dict , ) -> Optional[int]:
'''simple docstring'''
_a = get_file_from_repo(
UpperCamelCase_ , UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , resume_download=UpperCamelCase_ , proxies=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(UpperCamelCase_ , encoding="utf-8" ) as reader:
return json.load(UpperCamelCase_ )
class lowercase_ :
def __init__( self ) ->List[Any]:
'''simple docstring'''
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(a_ )
def lowerCamelCase__ ( cls , a_ , **a_ ) ->Dict:
'''simple docstring'''
_a = kwargs.pop("config" , a_ )
_a = kwargs.pop("trust_remote_code" , a_ )
_a = True
_a , _a = ImageProcessingMixin.get_image_processor_dict(a_ , **a_ )
_a = config_dict.get("image_processor_type" , a_ )
_a = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
_a = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a = config_dict.pop("feature_extractor_type" , a_ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
_a = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_a = config_dict["auto_map"]["AutoFeatureExtractor"]
_a = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(a_ , a_ ):
_a = AutoConfig.from_pretrained(a_ , **a_ )
# It could be in `config.image_processor_type``
_a = getattr(a_ , "image_processor_type" , a_ )
if hasattr(a_ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
_a = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
_a = image_processor_class_from_name(a_ )
_a = image_processor_auto_map is not None
_a = image_processor_class is not None or type(a_ ) in IMAGE_PROCESSOR_MAPPING
_a = resolve_trust_remote_code(
a_ , a_ , a_ , a_ )
if has_remote_code and trust_remote_code:
_a = get_class_from_dynamic_module(
a_ , a_ , **a_ )
_a = kwargs.pop("code_revision" , a_ )
if os.path.isdir(a_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(a_ , **a_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(a_ , **a_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(a_ ) in IMAGE_PROCESSOR_MAPPING:
_a = IMAGE_PROCESSOR_MAPPING[type(a_ )]
return image_processor_class.from_dict(a_ , **a_ )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCamelCase__ ( a_ , a_ ) ->Optional[int]:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(a_ , a_ )
| 612 | 0 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Any ) -> int:
"""simple docstring"""
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
requires_backends(self , '''vision''' )
self.check_model_type(lowerCamelCase__ )
def __call__( self : Optional[Any] , lowerCamelCase__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase__ : str ) -> str:
"""simple docstring"""
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , **lowerCamelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
return {}, {}, {}
def UpperCAmelCase_ ( self : Union[str, Any] , lowerCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = load_image(lowerCamelCase__ )
__lowercase = image.size
__lowercase = self.image_processor(images=lowerCamelCase__ , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowerCamelCase__ : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = self.model(**lowerCamelCase__ )
return model_outputs
def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = model_outputs.predicted_depth
__lowercase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=lowerCamelCase__ )
__lowercase = prediction.squeeze().cpu().numpy()
__lowercase = (output * 255 / np.max(lowerCamelCase__ )).astype('''uint8''' )
__lowercase = Image.fromarray(lowerCamelCase__ )
__lowercase = {}
__lowercase = predicted_depth
__lowercase = depth
return output_dict
| 332 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : Tuple = 'vit_msn'
def __init__( self : List[Any] , lowerCamelCase__ : Any=768 , lowerCamelCase__ : Dict=12 , lowerCamelCase__ : str=12 , lowerCamelCase__ : Union[str, Any]=3_072 , lowerCamelCase__ : str="gelu" , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Optional[int]=0.0_2 , lowerCamelCase__ : List[Any]=1e-0_6 , lowerCamelCase__ : Tuple=224 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : int=True , **lowerCamelCase__ : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
| 332 | 1 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = pad_token_id
_lowerCAmelCase : List[Any] = max_length
_lowerCAmelCase : Tuple = vocab
_lowerCAmelCase : str = merges
_lowerCAmelCase : List[str] = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = [' '.join(snake_case__ ) for m in tokenizer.bpe_ranks.keys()]
_lowerCAmelCase : Any = tokenizer.get_vocab()
return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__ )
return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ ):
'''simple docstring'''
return cls(**snake_case__ )
def a ( self ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = self.tf_tokenizer(snake_case__ )
_lowerCAmelCase : str = tf.ones_like(snake_case__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
_lowerCAmelCase : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
_lowerCAmelCase , _lowerCAmelCase : str = pad_model_inputs(
snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 630 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [10, 20, 30, 40, 50, 60]
_lowerCAmelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
_lowerCAmelCase : Dict = 100
self.assertEqual(kp.calc_profit(snake_case__ , snake_case__ , snake_case__ ) , 210 )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Weight can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'Profit can not be negative.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(snake_case__ , 'max_weight must greater than zero.' )
def a ( self ):
'''simple docstring'''
self.assertRaisesRegex(
snake_case__ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 630 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
sd_pipe.set_scheduler('sample_euler' )
__SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe([prompt] , generator=_A , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
sd_pipe.set_scheduler('sample_euler' )
__SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe([prompt] , generator=_A , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
__SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_A , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=_A , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 148 |
from __future__ import annotations
import requests
lowerCAmelCase__ : Any =set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def __lowercase ( a__ , a__ = 1 , a__ = "new" , a__ = None ) -> dict:
__SCREAMING_SNAKE_CASE = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(a__ ) - valid_terms ) ):
__SCREAMING_SNAKE_CASE = f"""Invalid search term: {invalid_search_terms}"""
raise ValueError(a__ )
__SCREAMING_SNAKE_CASE = requests.get(
f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'User-agent': 'A random string'} , )
if response.status_code == 4_29:
raise requests.HTTPError
__SCREAMING_SNAKE_CASE = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(a__ )}
__SCREAMING_SNAKE_CASE = {}
for id_ in range(a__ ):
__SCREAMING_SNAKE_CASE = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 148 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a ( UpperCAmelCase ):
@staticmethod
@abstractmethod
def _UpperCAmelCase ( A_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def _UpperCAmelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
| 708 |
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str ) -> int:
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError("String lengths must match!" )
_UpperCAmelCase : List[Any] = 0
for chara, chara in zip(lowerCAmelCase , lowerCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 467 | 0 |
from ..utils import DummyObject, requires_backends
class _UpperCamelCase (metaclass=a_ ):
snake_case_ = ["""keras_nlp"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase )-> Union[str, Any]:
requires_backends(self , ["keras_nlp"] )
| 367 |
'''simple docstring'''
import re
def lowerCAmelCase__ ( lowerCamelCase : str ):
if len(re.findall('[ATCG]' ,lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' ,'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 | 0 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__a = False
__a = logging.get_logger(__name__)
__a = 'ybelkada/fonts'
def lowerCamelCase__ ( ):
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
requires_backends(_lowercase , ['''torch'''] )
_check_torch_version()
UpperCAmelCase_ : Tuple = image_tensor.unsqueeze(0 )
UpperCAmelCase_ : Union[str, Any] = torch.nn.functional.unfold(_lowercase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCAmelCase_ : int = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _lowercase , _lowercase , -1 )
UpperCAmelCase_ : List[str] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowerCamelCase__ ( _lowercase , _lowercase = 36 , _lowercase = "black" , _lowercase = "white" , _lowercase = 5 , _lowercase = 5 , _lowercase = 5 , _lowercase = 5 , _lowercase = None , _lowercase = None , ):
'''simple docstring'''
requires_backends(_lowercase , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
UpperCAmelCase_ : Optional[Any] = textwrap.TextWrapper(width=80 )
UpperCAmelCase_ : Dict = wrapper.wrap(text=_lowercase )
UpperCAmelCase_ : Optional[Any] = '''\n'''.join(_lowercase )
if font_bytes is not None and font_path is None:
UpperCAmelCase_ : str = io.BytesIO(_lowercase )
elif font_path is not None:
UpperCAmelCase_ : List[str] = font_path
else:
UpperCAmelCase_ : Optional[Any] = hf_hub_download(_lowercase , '''Arial.TTF''' )
UpperCAmelCase_ : Optional[Any] = ImageFont.truetype(_lowercase , encoding='''UTF-8''' , size=_lowercase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCAmelCase_ : Optional[int] = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , _lowercase ) )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : str = temp_draw.textbbox((0, 0) , _lowercase , _lowercase )
# Create the actual image with a bit of padding around the text.
UpperCAmelCase_ : Optional[int] = text_width + left_padding + right_padding
UpperCAmelCase_ : Dict = text_height + top_padding + bottom_padding
UpperCAmelCase_ : Any = Image.new('''RGB''' , (image_width, image_height) , _lowercase )
UpperCAmelCase_ : str = ImageDraw.Draw(_lowercase )
draw.text(xy=(left_padding, top_padding) , text=_lowercase , fill=_lowercase , font=_lowercase )
return image
def lowerCamelCase__ ( _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
requires_backends(_lowercase , '''vision''' )
# Convert to PIL image if necessary
UpperCAmelCase_ : List[str] = to_pil_image(_lowercase )
UpperCAmelCase_ : Dict = render_text(_lowercase , **_lowercase )
UpperCAmelCase_ : List[str] = max(header_image.width , image.width )
UpperCAmelCase_ : int = int(image.height * (new_width / image.width) )
UpperCAmelCase_ : List[Any] = int(header_image.height * (new_width / header_image.width) )
UpperCAmelCase_ : Union[str, Any] = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCAmelCase_ : Optional[Any] = to_numpy_array(_lowercase )
if infer_channel_dimension_format(_lowercase ) == ChannelDimension.LAST:
UpperCAmelCase_ : List[Any] = to_channel_dimension_format(_lowercase , ChannelDimension.LAST )
return new_image
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = ['''flattened_patches''']
def __init__( self ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 2_048 ,_SCREAMING_SNAKE_CASE = False ,**_SCREAMING_SNAKE_CASE ,) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
UpperCAmelCase_ : Optional[int] = do_normalize
UpperCAmelCase_ : List[str] = do_convert_rgb
UpperCAmelCase_ : List[Any] = max_patches
UpperCAmelCase_ : Optional[Any] = is_vqa
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> np.ndarray:
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
UpperCAmelCase_ : int = to_channel_dimension_format(_SCREAMING_SNAKE_CASE ,ChannelDimension.FIRST )
UpperCAmelCase_ : Any = torch.from_numpy(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = patch_size['''height'''], patch_size['''width''']
UpperCAmelCase_, UpperCAmelCase_ : Dict = get_image_size(_SCREAMING_SNAKE_CASE )
# maximize scale s.t.
UpperCAmelCase_ : Optional[Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCAmelCase_ : str = max(min(math.floor(scale * image_height / patch_height ) ,_SCREAMING_SNAKE_CASE ) ,1 )
UpperCAmelCase_ : str = max(min(math.floor(scale * image_width / patch_width ) ,_SCREAMING_SNAKE_CASE ) ,1 )
UpperCAmelCase_ : Optional[int] = max(num_feasible_rows * patch_height ,1 )
UpperCAmelCase_ : List[Any] = max(num_feasible_cols * patch_width ,1 )
UpperCAmelCase_ : List[Any] = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=_SCREAMING_SNAKE_CASE ,antialias=_SCREAMING_SNAKE_CASE ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCAmelCase_ : str = torch_extract_patches(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = patches.shape
UpperCAmelCase_ : Tuple = patches_shape[1]
UpperCAmelCase_ : Optional[int] = patches_shape[2]
UpperCAmelCase_ : Optional[Any] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCAmelCase_ : List[Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCAmelCase_ : List[Any] = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([rows, 1] ).repeat(1 ,_SCREAMING_SNAKE_CASE ).reshape([rows * columns, 1] )
UpperCAmelCase_ : List[str] = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([1, columns] ).repeat(_SCREAMING_SNAKE_CASE ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCAmelCase_ : List[str] = row_ids.to(torch.floataa )
UpperCAmelCase_ : str = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCAmelCase_ : Union[str, Any] = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCAmelCase_ : Any = torch.nn.functional.pad(_SCREAMING_SNAKE_CASE ,[0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCAmelCase_ : str = to_numpy_array(_SCREAMING_SNAKE_CASE )
return result
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ) -> np.ndarray:
if image.dtype == np.uinta:
UpperCAmelCase_ : Optional[int] = image.astype(np.floataa )
# take mean across the whole `image`
UpperCAmelCase_ : str = np.mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.std(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = max(_SCREAMING_SNAKE_CASE ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**_SCREAMING_SNAKE_CASE ,) -> ImageInput:
UpperCAmelCase_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ : Any = patch_size if patch_size is not None else self.patch_size
UpperCAmelCase_ : Any = max_patches if max_patches is not None else self.max_patches
UpperCAmelCase_ : int = self.is_vqa
if kwargs.get('''data_format''' ,_SCREAMING_SNAKE_CASE ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
UpperCAmelCase_ : Optional[Any] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ : Union[str, Any] = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
UpperCAmelCase_ : List[str] = kwargs.pop('''font_bytes''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = kwargs.pop('''font_path''' ,_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[Any] = [header_text] * len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = [
render_header(_SCREAMING_SNAKE_CASE ,header_text[i] ,font_bytes=_SCREAMING_SNAKE_CASE ,font_path=_SCREAMING_SNAKE_CASE )
for i, image in enumerate(_SCREAMING_SNAKE_CASE )
]
if do_normalize:
UpperCAmelCase_ : Union[str, Any] = [self.normalize(image=_SCREAMING_SNAKE_CASE ) for image in images]
# convert to torch tensor and permute
UpperCAmelCase_ : Any = [
self.extract_flattened_patches(image=_SCREAMING_SNAKE_CASE ,max_patches=_SCREAMING_SNAKE_CASE ,patch_size=_SCREAMING_SNAKE_CASE )
for image in images
]
# create attention mask in numpy
UpperCAmelCase_ : Dict = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCAmelCase_ : int = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=_SCREAMING_SNAKE_CASE )
return encoded_outputs | 300 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.txt'}
__a = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ConvBertTokenizer
def __init__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE="[UNK]" ,_SCREAMING_SNAKE_CASE="[SEP]" ,_SCREAMING_SNAKE_CASE="[PAD]" ,_SCREAMING_SNAKE_CASE="[CLS]" ,_SCREAMING_SNAKE_CASE="[MASK]" ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Tuple:
super().__init__(
_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,do_lower_case=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,tokenize_chinese_chars=_SCREAMING_SNAKE_CASE ,strip_accents=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,_SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,_SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,_SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE ,normalizer_state.pop('''type''' ) )
UpperCAmelCase_ : int = do_lower_case
UpperCAmelCase_ : Dict = strip_accents
UpperCAmelCase_ : Optional[Any] = tokenize_chinese_chars
UpperCAmelCase_ : Union[str, Any] = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = do_lower_case
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Dict:
UpperCAmelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE ,name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE ) | 300 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case : Union[str, Any] = 16
_snake_case : Optional[Any] = 32
def a_ ( lowerCAmelCase_ : Accelerator, lowerCAmelCase_ : int = 16 ):
__lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCAmelCase = load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCAmelCase_ : Any ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase = datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase = 8
else:
__lowerCAmelCase = None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
__lowerCAmelCase = DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case : Union[str, Any] = mocked_dataloaders # noqa: F811
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Dict ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1":
__lowerCAmelCase = 2
# Initialize accelerator
__lowerCAmelCase = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config['lr']
__lowerCAmelCase = int(config['num_epochs'] )
__lowerCAmelCase = int(config['seed'] )
__lowerCAmelCase = int(config['batch_size'] )
__lowerCAmelCase = evaluate.load('glue', 'mrpc' )
# If the batch size is too big we use gradient accumulation
__lowerCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
__lowerCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
# Instantiate scheduler
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs.loss
__lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__lowerCAmelCase = 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCAmelCase_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
__lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""", lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 53 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a__ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=lowercase , cache_dir=lowercase)
a__ : Any = [t[-1] for t in os.walk(os.path.join(lowercase , os.listdir(lowercase)[0] , 'snapshots'))]
a__ : str = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin') for f in files)
@slow
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ , a__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=lowercase)
a__ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : Tuple = jax.random.PRNGKey(0)
a__ : str = 4
a__ : Dict = jax.device_count()
a__ : List[Any] = num_samples * [prompt]
a__ : Any = pipeline.prepare_inputs(lowercase)
# shard inputs and rng
a__ : str = replicate(lowercase)
a__ : Dict = jax.random.split(lowercase , lowercase)
a__ : Dict = shard(lowercase)
a__ : List[str] = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_51_47_45) < 1e-3
assert np.abs(np.abs(lowercase , dtype=np.floataa).sum() - 4_99_47.8_75) < 5e-1
a__ : List[str] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(lowercase) == num_samples
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : int = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=lowercase)
a__ : str = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : List[str] = jax.random.PRNGKey(0)
a__ : Any = 50
a__ : Tuple = jax.device_count()
a__ : Optional[int] = num_samples * [prompt]
a__ : Optional[int] = pipeline.prepare_inputs(lowercase)
# shard inputs and rng
a__ : List[Any] = replicate(lowercase)
a__ : int = jax.random.split(lowercase , lowercase)
a__ : Optional[int] = shard(lowercase)
a__ : str = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_65_24_01)) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa).sum() - 2_38_38_08.2)) < 5e-1
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ , a__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowercase)
a__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : Optional[int] = jax.random.PRNGKey(0)
a__ : Dict = 50
a__ : List[Any] = jax.device_count()
a__ : Dict = num_samples * [prompt]
a__ : Any = pipeline.prepare_inputs(lowercase)
# shard inputs and rng
a__ : Optional[Any] = replicate(lowercase)
a__ : List[Any] = jax.random.split(lowercase , lowercase)
a__ : Optional[Any] = shard(lowercase)
a__ : Optional[Any] = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5e-1
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ , a__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa)
a__ : Optional[int] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : List[Any] = jax.random.PRNGKey(0)
a__ : List[Any] = 50
a__ : int = jax.device_count()
a__ : Tuple = num_samples * [prompt]
a__ : Dict = pipeline.prepare_inputs(lowercase)
# shard inputs and rng
a__ : int = replicate(lowercase)
a__ : List[str] = jax.random.split(lowercase , lowercase)
a__ : Optional[Any] = shard(lowercase)
a__ : Tuple = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5e-1
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=lowercase , steps_offset=1 , )
a__ , a__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=lowercase , safety_checker=lowercase , )
a__ : str = scheduler.create_state()
a__ : List[str] = scheduler_state
a__ : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : List[str] = jax.random.PRNGKey(0)
a__ : List[Any] = 50
a__ : Tuple = jax.device_count()
a__ : List[Any] = num_samples * [prompt]
a__ : List[Any] = pipeline.prepare_inputs(lowercase)
# shard inputs and rng
a__ : List[Any] = replicate(lowercase)
a__ : Any = jax.random.split(lowercase , lowercase)
a__ : Optional[int] = shard(lowercase)
a__ : Optional[int] = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_45_04_39_45)) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa).sum() - 2_34_76_93.5)) < 5e-1
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : str = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
a__ : Optional[Any] = jax.device_count()
a__ : List[str] = num_samples * [prompt]
a__ : List[str] = jax.random.split(jax.random.PRNGKey(0) , lowercase)
a__ , a__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowercase , )
a__ : List[str] = replicate(lowercase)
a__ : int = pipeline.prepare_inputs(lowercase)
a__ : Dict = shard(lowercase)
a__ : Tuple = pipeline(lowercase , lowercase , lowercase , jit=lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
a__ : Tuple = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
a__ , a__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowercase , use_memory_efficient_attention=lowercase , )
a__ : int = replicate(lowercase)
a__ : str = pipeline.prepare_inputs(lowercase)
a__ : Dict = shard(lowercase)
a__ : int = pipeline(lowercase , lowercase , lowercase , jit=lowercase).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
a__ : Dict = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1e-2
| 302 | 0 |
"""simple docstring"""
import os
import sys
import unittest
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__UpperCAmelCase = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
__UpperCAmelCase = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = get_test_to_tester_mapping(__A )
lowerCAmelCase_ :str = get_test_to_tester_mapping(__A )
lowerCAmelCase_ :Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
lowerCAmelCase_ :int = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(__A ) , __A )
self.assertEqual(get_test_info.to_json(__A ) , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Tuple = get_model_to_test_mapping(__A )
lowerCAmelCase_ :Any = get_model_to_test_mapping(__A )
lowerCAmelCase_ :Tuple = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
lowerCAmelCase_ :Optional[Any] = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(__A ) , __A )
self.assertEqual(get_test_info.to_json(__A ) , __A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Optional[Any] = get_model_to_tester_mapping(__A )
lowerCAmelCase_ :Union[str, Any] = get_model_to_tester_mapping(__A )
lowerCAmelCase_ :Any = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
lowerCAmelCase_ :List[str] = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(__A ) , __A )
self.assertEqual(get_test_info.to_json(__A ) , __A )
| 708 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def _snake_case ( lowercase__ : float ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError("""math domain error""" )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def _snake_case ( lowercase__ : float , lowercase__ : float ) -> float:
'''simple docstring'''
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 256 | 0 |
'''simple docstring'''
import numpy as np
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1e-12 , SCREAMING_SNAKE_CASE_ = 1_00 , ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(UpperCamelCase__ )[0] == np.shape(UpperCamelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCamelCase__ )[0] == np.shape(UpperCamelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCamelCase__ ) == np.iscomplexobj(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = np.iscomplexobj(UpperCamelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCamelCase__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1e12
while not convergence:
# Multiple matrix by the vector.
_SCREAMING_SNAKE_CASE = np.dot(UpperCamelCase__ , UpperCamelCase__ )
# Normalize the resulting output vector.
_SCREAMING_SNAKE_CASE = w / np.linalg.norm(UpperCamelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_SCREAMING_SNAKE_CASE = vector.conj().T if is_complex else vector.T
_SCREAMING_SNAKE_CASE = np.dot(UpperCamelCase__ , np.dot(UpperCamelCase__ , UpperCamelCase__ ) )
# Check convergence.
_SCREAMING_SNAKE_CASE = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = lambda_
if is_complex:
_SCREAMING_SNAKE_CASE = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_SCREAMING_SNAKE_CASE = np.array([41, 4, 20] )
_SCREAMING_SNAKE_CASE = real_input_matrix.astype(np.complexaaa )
_SCREAMING_SNAKE_CASE = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_SCREAMING_SNAKE_CASE = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_SCREAMING_SNAKE_CASE = real_input_matrix
_SCREAMING_SNAKE_CASE = real_vector
elif problem_type == "complex":
_SCREAMING_SNAKE_CASE = complex_input_matrix
_SCREAMING_SNAKE_CASE = complex_vector
# Our implementation.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = power_iteration(UpperCamelCase__ , UpperCamelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = np.linalg.eigh(UpperCamelCase__ )
# Last eigenvalue is the maximum one.
_SCREAMING_SNAKE_CASE = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_SCREAMING_SNAKE_CASE = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCamelCase__ ) - np.abs(UpperCamelCase__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 591 |
from string import ascii_uppercase
__A = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
__lowerCamelCase = ''
__lowerCamelCase = 0
__lowerCamelCase = 0
while div != 1:
__lowerCamelCase , __lowerCamelCase = divmod(UpperCamelCase__ , UpperCamelCase__ )
if base >= 11 and 9 < mod < 36:
__lowerCamelCase = ALPHABET_VALUES[str(UpperCamelCase__ )]
else:
__lowerCamelCase = str(UpperCamelCase__ )
new_value += actual_value
__lowerCamelCase = num // base
__lowerCamelCase = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(UpperCamelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 469 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[str] = (UnCLIPScheduler,)
def snake_case_ ( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : int = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def snake_case_ ( self ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ ,prev_timestep=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Any = self.scheduler_classes[0]
snake_case : Union[str, Any] = self.get_scheduler_config(variance_type="""fixed_small_log""" )
snake_case : Any = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = self.scheduler_classes[0]
snake_case : List[str] = self.get_scheduler_config(variance_type="""learned_range""" )
snake_case : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = 0.5
assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE_ ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 ,predicted_variance=SCREAMING_SNAKE_CASE_ ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 ,predicted_variance=SCREAMING_SNAKE_CASE_ ) - -0.0_01_00_11 < 1E-5
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = self.scheduler_classes[0]
snake_case : Any = self.get_scheduler_config()
snake_case : str = scheduler_class(**SCREAMING_SNAKE_CASE_ )
snake_case : int = scheduler.timesteps
snake_case : Dict = self.dummy_model()
snake_case : Any = self.dummy_sample_deter
snake_case : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. predict noise residual
snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# 2. predict previous mean of sample x_t-1
snake_case : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ).prev_sample
snake_case : Dict = pred_prev_sample
snake_case : List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
snake_case : List[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[str] = self.scheduler_classes[0]
snake_case : List[str] = self.get_scheduler_config()
snake_case : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(25 )
snake_case : Dict = scheduler.timesteps
snake_case : List[Any] = self.dummy_model()
snake_case : int = self.dummy_sample_deter
snake_case : int = torch.manual_seed(0 )
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. predict noise residual
snake_case : List[Any] = model(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if i + 1 == timesteps.shape[0]:
snake_case : List[Any] = None
else:
snake_case : str = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
snake_case : Optional[int] = scheduler.step(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,prev_timestep=SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ).prev_sample
snake_case : str = pred_prev_sample
snake_case : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
snake_case : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ):
'''simple docstring'''
pass
| 315 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase ( __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def lowercase ( __A : Tuple , __A : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def lowercase ( __A : str ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", """stage2.cls_token""") )
return token
def lowercase ( ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def lowercase ( __A : int , __A : Union[str, Any] , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
snake_case : Dict = """imagenet-1k-id2label.json"""
snake_case : Tuple = 1000
snake_case : List[Any] = """huggingface/label-files"""
snake_case : List[str] = num_labels
snake_case : Optional[Any] = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type="""dataset""" ) ) , """r""" ) )
snake_case : str = {int(__A ): v for k, v in idalabel.items()}
snake_case : Union[str, Any] = idalabel
snake_case : Any = {v: k for k, v in idalabel.items()}
snake_case : Optional[Any] = CvtConfig(num_labels=__A , idalabel=__A , labelaid=__A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case : Dict = [2, 2, 20]
snake_case : List[str] = [3, 12, 16]
snake_case : int = [192, 768, 1024]
snake_case : Union[str, Any] = CvtForImageClassification(__A )
snake_case : int = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case : Union[str, Any] = image_size
snake_case : Dict = torch.load(__A , map_location=torch.device("""cpu""" ) )
snake_case : List[str] = OrderedDict()
snake_case : Optional[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case : Optional[int] = list_of_state_dict + cls_token(__A )
snake_case : Dict = list_of_state_dict + embeddings(__A )
for cnt in range(config.depth[idx] ):
snake_case : Any = list_of_state_dict + attention(__A , __A )
snake_case : Tuple = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__A )
for i in range(len(__A ) ):
snake_case : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__A )
model.save_pretrained(__A )
image_processor.save_pretrained(__A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase : List[Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 315 | 1 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _UpperCAmelCase :
def __init__( self , a__ , a__=1_3 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=9_9 , a__=3_2 , a__=2 , a__=4 , a__=3_7 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_1_2 , a__=1_6 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = relative_attention
A__ = position_biased_input
A__ = pos_att_type
A__ = scope
def snake_case_ ( self):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__):
A__ = TFDebertaVaModel(config=a__)
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ = [input_ids, input_mask]
A__ = model(a__)
A__ = model(a__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__):
A__ = TFDebertaVaForMaskedLM(config=a__)
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__):
A__ = self.num_labels
A__ = TFDebertaVaForSequenceClassification(config=a__)
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__):
A__ = self.num_labels
A__ = TFDebertaVaForTokenClassification(config=a__)
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__):
A__ = TFDebertaVaForQuestionAnswering(config=a__)
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A__ = model(a__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case_ ( self):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
UpperCamelCase__ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def snake_case_ ( self):
A__ = TFDebertaVaModelTester(self)
A__ = ConfigTester(self , config_class=a__ , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__)
@slow
def snake_case_ ( self):
A__ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''')
self.assertIsNotNone(a__)
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''')
def snake_case_ ( self):
pass
@slow
def snake_case_ ( self):
A__ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''')
A__ = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
A__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
A__ = model(a__ , attention_mask=a__)[0]
A__ = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]])
tf.debugging.assert_near(output[:, 1:4, 1:4] , a__ , atol=1e-4)
| 632 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( A__ , A__ , A__ ):
UpperCamelCase__ = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , a__ , a__ , a__ = None , a__ = 5_0_2_5_7 , a__ = 1_0_2_4 , a__ = 7_6_8 , a__ = 1_2 , a__ = 1_2 , a__ = None , a__ = "gelu_new" , a__ = 0.1 , a__ = 0.1 , a__ = 0.1 , a__ = 1e-5 , a__ = 0.0_2 , a__ = True , a__ = True , a__ = False , a__ = False , ):
super().__init__()
A__ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
F" `n_embd`: {n_embd} are not equal.")
A__ = prefix_inner_dim
A__ = prefix_hidden_dim
A__ = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ = (
nn.Linear(self.prefix_hidden_dim , a__) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ = GPTaConfig(
vocab_size=a__ , n_positions=a__ , n_embd=a__ , n_layer=a__ , n_head=a__ , n_inner=a__ , activation_function=a__ , resid_pdrop=a__ , embd_pdrop=a__ , attn_pdrop=a__ , layer_norm_epsilon=a__ , initializer_range=a__ , scale_attn_weights=a__ , use_cache=a__ , scale_attn_by_inverse_layer_idx=a__ , reorder_and_upcast_attn=a__ , )
A__ = GPTaLMHeadModel(a__)
def snake_case_ ( self , a__ , a__ , a__ = None , a__ = None , ):
A__ = self.transformer.transformer.wte(a__)
A__ = self.encode_prefix(a__)
A__ = self.decode_prefix(a__)
A__ = torch.cat((prefix_embeds, embedding_text) , dim=1)
if labels is not None:
A__ = self.get_dummy_token(input_ids.shape[0] , input_ids.device)
A__ = torch.cat((dummy_token, input_ids) , dim=1)
A__ = self.transformer(inputs_embeds=a__ , labels=a__ , attention_mask=a__)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case_ ( self , a__ , a__):
return torch.zeros(a__ , self.prefix_length , dtype=torch.intaa , device=a__)
def snake_case_ ( self , a__):
return self.encode_prefix(a__)
@torch.no_grad()
def snake_case_ ( self , a__ , a__ , a__):
A__ = torch.split(a__ , 1 , dim=0)
A__ = []
A__ = []
for feature in features:
A__ = self.decode_prefix(feature.to(a__)) # back to the clip feature
# Only support beam search for now
A__ , A__ = self.generate_beam(
input_embeds=a__ , device=a__ , eos_token_id=a__)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
A__ = torch.stack(a__)
A__ = torch.stack(a__)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case_ ( self , a__=None , a__=None , a__=None , a__ = 5 , a__ = 6_7 , a__ = 1.0 , a__ = None , ):
A__ = eos_token_id
A__ = None
A__ = None
A__ = torch.ones(a__ , device=a__ , dtype=torch.int)
A__ = torch.zeros(a__ , device=a__ , dtype=torch.bool)
if input_embeds is not None:
A__ = input_embeds
else:
A__ = self.transformer.transformer.wte(a__)
for i in range(a__):
A__ = self.transformer(inputs_embeds=a__)
A__ = outputs.logits
A__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ = logits.softmax(-1).log()
if scores is None:
A__ , A__ = logits.topk(a__ , -1)
A__ = generated.expand(a__ , *generated.shape[1:])
A__ , A__ = next_tokens.permute(1 , 0), scores.squeeze(0)
if tokens is None:
A__ = next_tokens
else:
A__ = tokens.expand(a__ , *tokens.shape[1:])
A__ = torch.cat((tokens, next_tokens) , dim=1)
else:
A__ = -float(np.inf)
A__ = 0
A__ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ = scores_sum / seq_lengths[:, None]
A__ , A__ = scores_sum_average.view(-1).topk(a__ , -1)
A__ = next_tokens // scores_sum.shape[1]
A__ = seq_lengths[next_tokens_source]
A__ = next_tokens % scores_sum.shape[1]
A__ = next_tokens.unsqueeze(1)
A__ = tokens[next_tokens_source]
A__ = torch.cat((tokens, next_tokens) , dim=1)
A__ = generated[next_tokens_source]
A__ = scores_sum_average * seq_lengths
A__ = is_stopped[next_tokens_source]
A__ = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1)
A__ = torch.cat((generated, next_token_embed) , dim=1)
A__ = is_stopped + next_tokens.eq(a__).squeeze()
if is_stopped.all():
break
A__ = scores / seq_lengths
A__ = scores.argsort(descending=a__)
# tokens tensors are already padded to max_seq_length
A__ = [tokens[i] for i in order]
A__ = torch.stack(a__ , dim=0)
A__ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 632 | 1 |
import warnings
from .generation import TFGenerationMixin
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , _UpperCamelCase , )
| 662 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[str] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
__lowerCAmelCase : Any = '▁'
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : str = BigBirdTokenizer
SCREAMING_SNAKE_CASE_ : str = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : int , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : int="<s>" , __lowerCamelCase : Optional[Any]="</s>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Tuple="[CLS]" , **__lowerCamelCase : Optional[Any] , ) -> List[Any]:
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 662 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[int] = '''pix2struct_text_model'''
lowerCamelCase_ : str = ['''past_key_values''']
lowerCamelCase_ : Any = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self , __magic_name__=5_0244 , __magic_name__=768 , __magic_name__=64 , __magic_name__=2048 , __magic_name__=12 , __magic_name__=12 , __magic_name__=32 , __magic_name__=128 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=1.0 , __magic_name__="gelu_new" , __magic_name__=0 , __magic_name__=False , __magic_name__=0 , __magic_name__=1 , __magic_name__=False , __magic_name__=True , **__magic_name__ , ) -> Tuple:
'''simple docstring'''
snake_case_ : str = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Union[str, Any] = d_kv
snake_case_ : Dict = d_ff
snake_case_ : str = num_layers
snake_case_ : Tuple = num_heads
snake_case_ : int = relative_attention_num_buckets
snake_case_ : Optional[int] = relative_attention_max_distance
snake_case_ : int = dropout_rate
snake_case_ : Optional[Any] = layer_norm_epsilon
snake_case_ : Tuple = initializer_factor
snake_case_ : Union[str, Any] = use_cache
snake_case_ : str = eos_token_id
snake_case_ : Dict = decoder_start_token_id
# for backwards compatibility
snake_case_ : Tuple = dense_act_fn
super().__init__(
pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , is_decoder=__magic_name__ , **__magic_name__ , )
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : Optional[int] = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
snake_case_ : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[int] = '''pix2struct_vision_model'''
def __init__(self , __magic_name__=768 , __magic_name__=768 , __magic_name__=2048 , __magic_name__=64 , __magic_name__=12 , __magic_name__=12 , __magic_name__="gelu_new" , __magic_name__=1e-6 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=1e-10 , __magic_name__=1.0 , __magic_name__=4096 , __magic_name__=32 , __magic_name__=128 , **__magic_name__ , ) -> Dict:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : Any = hidden_size
snake_case_ : int = patch_embed_hidden_size
snake_case_ : List[Any] = d_ff
snake_case_ : str = dropout_rate
snake_case_ : str = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Tuple = initializer_range
snake_case_ : Any = initializer_factor
snake_case_ : str = attention_dropout
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : Optional[int] = dense_act_fn
snake_case_ : List[str] = seq_len
snake_case_ : Optional[int] = relative_attention_num_buckets
snake_case_ : List[str] = relative_attention_max_distance
snake_case_ : Tuple = d_kv
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : Optional[int] = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
snake_case_ : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : str = '''pix2struct'''
lowerCamelCase_ : List[str] = True
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=1.0 , __magic_name__=0.02 , __magic_name__=False , __magic_name__=False , __magic_name__=True , **__magic_name__ , ) -> str:
'''simple docstring'''
super().__init__(tie_word_embeddings=__magic_name__ , is_encoder_decoder=__magic_name__ , **__magic_name__ )
if text_config is None:
snake_case_ : Dict = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
snake_case_ : Union[str, Any] = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
snake_case_ : Optional[Any] = PixaStructTextConfig(**__magic_name__ )
snake_case_ : Optional[int] = PixaStructVisionConfig(**__magic_name__ )
snake_case_ : Optional[int] = self.text_config.decoder_start_token_id
snake_case_ : Tuple = self.text_config.pad_token_id
snake_case_ : List[str] = self.text_config.eos_token_id
snake_case_ : int = initializer_factor
snake_case_ : List[str] = initializer_range
snake_case_ : Tuple = self.initializer_range
snake_case_ : List[Any] = self.initializer_range
snake_case_ : int = is_vqa
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = copy.deepcopy(self.__dict__ )
snake_case_ : Dict = self.text_config.to_dict()
snake_case_ : Dict = self.vision_config.to_dict()
snake_case_ : int = self.__class__.model_type
return output
| 60 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class _snake_case ( A__ ):
'''simple docstring'''
UpperCamelCase__ ="""roberta"""
def __init__( self : Optional[int] , snake_case : int=50_265 , snake_case : Dict=768 , snake_case : Any=12 , snake_case : Optional[Any]=12 , snake_case : int=3_072 , snake_case : Dict="gelu" , snake_case : List[Any]=0.1 , snake_case : Optional[int]=0.1 , snake_case : Any=512 , snake_case : Any=2 , snake_case : List[str]=0.02 , snake_case : Dict=1e-12 , snake_case : str=1 , snake_case : Tuple=0 , snake_case : Tuple=2 , snake_case : Tuple="absolute" , snake_case : str=True , snake_case : Union[str, Any]=None , **snake_case : Optional[Any] , ):
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
UpperCAmelCase_ :List[str] = vocab_size
UpperCAmelCase_ :Union[str, Any] = hidden_size
UpperCAmelCase_ :Dict = num_hidden_layers
UpperCAmelCase_ :Dict = num_attention_heads
UpperCAmelCase_ :int = hidden_act
UpperCAmelCase_ :int = intermediate_size
UpperCAmelCase_ :Tuple = hidden_dropout_prob
UpperCAmelCase_ :Any = attention_probs_dropout_prob
UpperCAmelCase_ :List[str] = max_position_embeddings
UpperCAmelCase_ :Optional[Any] = type_vocab_size
UpperCAmelCase_ :Tuple = initializer_range
UpperCAmelCase_ :Optional[Any] = layer_norm_eps
UpperCAmelCase_ :int = position_embedding_type
UpperCAmelCase_ :Optional[Any] = use_cache
UpperCAmelCase_ :Dict = classifier_dropout
class _snake_case ( A__ ):
'''simple docstring'''
@property
def snake_case_ ( self : Optional[int] ):
if self.task == "multiple-choice":
UpperCAmelCase_ :Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 608 | 0 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
__A : Dict = 5
__A : str = 10
@require_sentencepiece
@require_tokenizers
class lowercase_ ( lowerCAmelCase__ , unittest.TestCase ):
__UpperCamelCase = SpeechaTextTokenizer
__UpperCamelCase = False
__UpperCamelCase = True
def _lowercase ( self: Dict):
'''simple docstring'''
super().setUp()
__lowerCAmelCase = sp.SentencePieceProcessor()
spm_model.Load(_lowercase)
__lowerCAmelCase = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(_lowercase))]
__lowerCAmelCase = dict(zip(_lowercase, range(len(_lowercase))))
__lowerCAmelCase = Path(self.tmpdirname)
save_json(_lowercase, save_dir / VOCAB_FILES_NAMES["""vocab_file"""])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowercase, save_dir / VOCAB_FILES_NAMES["""spm_file"""])
__lowerCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def _lowercase ( self: Any):
'''simple docstring'''
__lowerCAmelCase = """<pad>"""
__lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase), _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase), _lowercase)
def _lowercase ( self: List[str]):
'''simple docstring'''
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], """<s>""")
self.assertEqual(vocab_keys[1], """<pad>""")
self.assertEqual(vocab_keys[-1], """j""")
self.assertEqual(len(_lowercase), 1001)
def _lowercase ( self: List[str]):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1001)
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
__lowerCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
__lowerCAmelCase = tokenizer.tokenize("""This is a test""")
self.assertListEqual(_lowercase, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase), [289, 50, 14, 174, 386], )
__lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
_lowercase, [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""], )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(_lowercase, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8])
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase, [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""], )
@slow
def _lowercase ( self: List[Any]):
'''simple docstring'''
__lowerCAmelCase = {"""input_ids""": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase, model_name="""facebook/s2t-small-mustc-en-de-st""", revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""", )
@require_sentencepiece
class lowercase_ ( unittest.TestCase ):
__UpperCamelCase = "valhalla/s2t_mustc_multilinguial_medium"
__UpperCamelCase = "C\'est trop cool"
__UpperCamelCase = "Esto es genial"
@classmethod
def _lowercase ( cls: str):
'''simple docstring'''
__lowerCAmelCase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def _lowercase ( self: List[Any]):
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""], 4)
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""], 6)
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""], 9)
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""], 11)
def _lowercase ( self: str):
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size, 10000)
def _lowercase ( self: Any):
'''simple docstring'''
self.assertIn(_lowercase, self.tokenizer.all_special_ids)
__lowerCAmelCase = [ES_CODE, 4, 1601, 47, 7647, 2]
__lowerCAmelCase = self.tokenizer.decode(_lowercase, skip_special_tokens=_lowercase)
__lowerCAmelCase = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=_lowercase)
self.assertEqual(_lowercase, _lowercase)
self.assertNotIn(self.tokenizer.eos_token, _lowercase)
def _lowercase ( self: Optional[int]):
'''simple docstring'''
__lowerCAmelCase = """fr"""
__lowerCAmelCase = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0], _lowercase)
self.assertEqual(encoded[-1], self.tokenizer.eos_token_id)
def _lowercase ( self: Tuple):
'''simple docstring'''
__lowerCAmelCase = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE])
__lowerCAmelCase = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE])
| 708 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__A : Tuple = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
__A : Optional[Any] = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__lowerCAmelCase = int(re.match(R""".*layer_(\d*).*""" , UpperCamelCase__ )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def UpperCAmelCase ( UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
__lowerCAmelCase = re.search(R"""[^\d](\d+)$""" , str(UpperCamelCase__ ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
__lowerCAmelCase = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if bloom_config_file == "":
__lowerCAmelCase = BloomConfig()
else:
__lowerCAmelCase = BloomConfig.from_json_file(UpperCamelCase__ )
if shard_model:
__lowerCAmelCase = os.listdir(UpperCamelCase__ )
__lowerCAmelCase = sorted(filter(lambda UpperCamelCase__ : s.startswith("""layer""" ) and "model_00" in s , UpperCamelCase__ ) )
__lowerCAmelCase = {"""weight_map""": {}, """metadata""": {}}
__lowerCAmelCase = 0
__lowerCAmelCase = None
__lowerCAmelCase = BloomConfig()
for j, file in enumerate(UpperCamelCase__ ):
print("""Processing file: {}""".format(UpperCamelCase__ ) )
__lowerCAmelCase = None
for i in range(UpperCamelCase__ ):
# load all TP files
__lowerCAmelCase = file.replace("""model_00""" , F'''model_0{i}''' )
__lowerCAmelCase = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
__lowerCAmelCase = list(temp.keys() )
for key in keys:
__lowerCAmelCase = temp.pop(UpperCamelCase__ )
if tensors is None:
__lowerCAmelCase = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCamelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__lowerCAmelCase = tensors[key] / pretraining_tp
torch.save(
UpperCamelCase__ , os.path.join(
UpperCamelCase__ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__lowerCAmelCase = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__lowerCAmelCase = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase__ ) ).zfill(5 ) )
__lowerCAmelCase = BloomConfig()
__lowerCAmelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__lowerCAmelCase = total_size
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCamelCase__ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n"""
f.write(UpperCamelCase__ )
else:
__lowerCAmelCase = BloomModel(UpperCamelCase__ )
__lowerCAmelCase = os.listdir(UpperCamelCase__ )
__lowerCAmelCase = sorted(filter(lambda UpperCamelCase__ : s.startswith("""layer""" ) and "model_00" in s , UpperCamelCase__ ) )
__lowerCAmelCase = None
for i, file in enumerate(UpperCamelCase__ ):
__lowerCAmelCase = None
for i in range(UpperCamelCase__ ):
# load all TP files
__lowerCAmelCase = file.replace("""model_00""" , F'''model_0{i}''' )
__lowerCAmelCase = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , map_location="""cpu""" )
# Rename keys in the transformers names
__lowerCAmelCase = list(temp.keys() )
for key in keys:
__lowerCAmelCase = temp.pop(UpperCamelCase__ )
if tensors is None:
__lowerCAmelCase = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCamelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__lowerCAmelCase = tensors[key] / pretraining_tp
__lowerCAmelCase = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
__lowerCAmelCase = set(other_keys.missing_keys )
else:
__lowerCAmelCase = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__lowerCAmelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__lowerCAmelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
__lowerCAmelCase = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCamelCase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
__A : List[str] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 334 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _A ( A__ , A__ , A__=None , A__=None ):
"""simple docstring"""
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = OPTConfig
SCREAMING_SNAKE_CASE : Optional[int] = {}
SCREAMING_SNAKE_CASE : Optional[int] = 'gelu'
def __init__( self : int ,lowercase__ : Any ,lowercase__ : Tuple=1_3 ,lowercase__ : int=7 ,lowercase__ : str=True ,lowercase__ : Any=False ,lowercase__ : Optional[Any]=9_9 ,lowercase__ : List[Any]=1_6 ,lowercase__ : List[str]=2 ,lowercase__ : Tuple=4 ,lowercase__ : Tuple=4 ,lowercase__ : Any="gelu" ,lowercase__ : str=0.1 ,lowercase__ : str=0.1 ,lowercase__ : Union[str, Any]=2_0 ,lowercase__ : Union[str, Any]=2 ,lowercase__ : int=1 ,lowercase__ : List[Any]=0 ,lowercase__ : Optional[Any]=1_6 ,lowercase__ : Optional[int]=1_6 ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = embed_dim
__lowercase = word_embed_proj_dim
__lowercase = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
__lowercase = tf.concat([input_ids, eos_tensor] ,axis=1 )
__lowercase = self.config_cls(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,embed_dim=self.embed_dim ,word_embed_proj_dim=self.word_embed_proj_dim ,is_encoder_decoder=lowercase__ ,**self.config_updates ,)
__lowercase = prepare_opt_inputs_dict(lowercase__ ,lowercase__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ):
__lowercase = TFOPTModel(config=lowercase__ )
__lowercase = inputs_dict['''input_ids''']
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict['''attention_mask'''][:1, :]
__lowercase = 1
# first forward pass
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,use_cache=lowercase__ )
__lowercase , __lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size )
__lowercase = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
__lowercase = tf.concat([input_ids, next_tokens] ,axis=-1 )
__lowercase = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
__lowercase = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
__lowercase = output_from_no_past[:, -3:, random_slice_idx]
__lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ ,lowercase__ ,rtol=1e-3 )
@require_tf
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE : List[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE : List[str] = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = 1_0
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = TFOPTModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase__ : Optional[int] ,lowercase__ : List[str] ):
if hasattr(lowercase__ ,'''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase__ ,'''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
__lowercase = model_class(config=lowercase__ )
__lowercase = _get_word_embedding_weight(lowercase__ ,model.get_input_embeddings() )
__lowercase = _get_word_embedding_weight(lowercase__ ,model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase__ )
__lowercase = _get_word_embedding_weight(lowercase__ ,model.get_input_embeddings() )
__lowercase = _get_word_embedding_weight(lowercase__ ,model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowercase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] ,lowercase__ )
# check that weights remain the same after resizing
__lowercase = True
for pa, pa in zip(old_input_embeddings.value() ,new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase = False
self.assertTrue(lowercase__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] ,lowercase__ )
__lowercase = True
for pa, pa in zip(old_output_embeddings.value() ,new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase = False
self.assertTrue(lowercase__ )
def _A ( A__ ):
"""simple docstring"""
return tf.constant(A__ , dtype=tf.intaa )
@require_tf
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 9_9
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tf.ones((4, 1) ,dtype=tf.intaa ) * 2
__lowercase = tf.concat([ids_tensor((4, 6) ,self.vocab_size - 3 ) + 3, eos_column_vector] ,axis=1 )
__lowercase = input_ids.shape[0]
__lowercase = OPTConfig(
vocab_size=self.vocab_size ,hidden_size=2_4 ,num_hidden_layers=2 ,num_attention_heads=2 ,ffn_dim=3_2 ,max_position_embeddings=4_8 ,eos_token_id=2 ,pad_token_id=1 ,bos_token_id=0 ,)
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowercase = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowercase = tf.not_equal(lowercase__ ,model.config.pad_token_id )
with tf.GradientTape():
__lowercase = model(input_ids=lowercase__ ,attention_mask=lowercase__ ).last_hidden_state
__lowercase = (1, 1_1, 5_1_2)
self.assertEqual(output.shape ,lowercase__ )
__lowercase = tf.constant(
[[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]] )
self.assertTrue(np.allclose(output[:, :3, :3] ,lowercase__ ,atol=4e-3 ) )
__lowercase = tf.function(lowercase__ ,jit_compile=lowercase__ )
__lowercase = xla_generate(lowercase__ ,lowercase__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] ,lowercase__ ,atol=4e-2 ) )
@require_tf
@slow
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
super().setUp()
__lowercase = '''facebook/opt-350m'''
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowercase = GPTaTokenizer.from_pretrained(self.path_model )
__lowercase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowercase = tokenizer(lowercase__ ,return_tensors='''tf''' ,padding=lowercase__ ,add_special_tokens=lowercase__ )
__lowercase = tf.math.reduce_mean(model(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 )
__lowercase = tf.constant(
[
[1.3_8_5_1, -1_3.8_9_2_3, -1_0.5_2_2_9, -1_0.7_5_3_3, -0.2_3_0_9, -1_0.2_3_8_4, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0],
[-4.7_0_7_3, -1_0.6_2_7_6, -3.9_4_1_5, -2_1.5_2_4_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2],
[0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -1_4.1_6_5_0, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3],
[6.4_7_8_3, -1.9_9_1_3, -1_0.7_9_2_6, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7],
] )
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-4 ) )
__lowercase = tf.function(lowercase__ ,jit_compile=lowercase__ )
__lowercase = tf.math.reduce_mean(xla_generate(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 )
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-4 ) )
@require_tf
@slow
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = '''facebook/opt-125m'''
__lowercase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase = []
__lowercase = GPTaTokenizer.from_pretrained(lowercase__ )
__lowercase = TFOPTForCausalLM.from_pretrained(lowercase__ )
for prompt in self.prompts:
__lowercase = tokenizer(lowercase__ ,return_tensors='''tf''' ).input_ids
__lowercase = model.generate(lowercase__ ,max_length=1_0 )
__lowercase = tokenizer.batch_decode(lowercase__ ,skip_special_tokens=lowercase__ )
predicted_outputs += generated_string
self.assertListEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = '''facebook/opt-350m'''
__lowercase = GPTaTokenizer.from_pretrained(lowercase__ )
__lowercase = TFOPTForCausalLM.from_pretrained(lowercase__ )
__lowercase = '''left'''
# use different length sentences to test batching
__lowercase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowercase = tokenizer(lowercase__ ,return_tensors='''tf''' ,padding=lowercase__ )
__lowercase = inputs['''input_ids''']
__lowercase = model.generate(input_ids=lowercase__ ,attention_mask=inputs['''attention_mask'''] )
__lowercase = tokenizer(sentences[0] ,return_tensors='''tf''' ).input_ids
__lowercase = model.generate(input_ids=lowercase__ )
__lowercase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] ,tf.intaa ) )
__lowercase = tokenizer(sentences[1] ,return_tensors='''tf''' ).input_ids
__lowercase = model.generate(input_ids=lowercase__ ,max_length=model.config.max_length - num_paddings )
__lowercase = tokenizer.batch_decode(lowercase__ ,skip_special_tokens=lowercase__ )
__lowercase = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=lowercase__ )
__lowercase = tokenizer.decode(output_padded[0] ,skip_special_tokens=lowercase__ )
__lowercase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(lowercase__ ,lowercase__ )
self.assertListEqual(lowercase__ ,[non_padded_sentence, padded_sentence] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = '''facebook/opt-350m'''
__lowercase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase = []
__lowercase = GPTaTokenizer.from_pretrained(lowercase__ )
__lowercase = TFOPTForCausalLM.from_pretrained(lowercase__ )
for prompt in self.prompts:
__lowercase = tokenizer(lowercase__ ,return_tensors='''tf''' ).input_ids
__lowercase = model.generate(lowercase__ ,max_length=1_0 )
__lowercase = tokenizer.batch_decode(lowercase__ ,skip_special_tokens=lowercase__ )
predicted_outputs += generated_string
self.assertListEqual(lowercase__ ,lowercase__ )
| 41 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = ViTImageProcessor if is_vision_available() else None
@property
def __snake_case ( self : List[str]) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[Any]) -> List[str]:
A_ = (3, 32, 128)
A_ = tempfile.mkdtemp()
# fmt: off
A_ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
A_ = dict(zip(_lowercase , range(len(_lowercase))))
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_lowercase) + '\n')
A_ = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
A_ = os.path.join(self.tmpdirname , _lowercase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(_lowercase , _lowercase)
def __snake_case ( self : int , **_lowercase : Optional[int]) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase)
def __snake_case ( self : Optional[int] , **_lowercase : Optional[int]) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase)
def __snake_case ( self : Dict) -> str:
shutil.rmtree(self.tmpdirname)
def __snake_case ( self : Union[str, Any]) -> Any:
A_ = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
A_ = Image.fromarray(np.moveaxis(_lowercase , 0 , -1))
return image_input
def __snake_case ( self : Optional[Any]) -> List[Any]:
A_ = self.get_tokenizer()
A_ = self.get_image_processor()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
processor.save_pretrained(self.tmpdirname)
A_ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
def __snake_case ( self : Union[str, Any]) -> Optional[Any]:
A_ = self.get_tokenizer()
A_ = self.get_image_processor()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
processor.save_pretrained(self.tmpdirname)
A_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
A_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
A_ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
def __snake_case ( self : List[Any]) -> str:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = self.prepare_image_inputs()
A_ = image_processor(_lowercase , return_tensors='np')
A_ = processor(images=_lowercase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def __snake_case ( self : Any) -> str:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = 'test'
A_ = processor(text=_lowercase)
A_ = tokenizer(_lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __snake_case ( self : str) -> Dict:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = 'test'
A_ = self.prepare_image_inputs()
A_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'labels'])
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __snake_case ( self : Union[str, Any]) -> Optional[int]:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.char_decode(_lowercase)
A_ = tokenizer.batch_decode(_lowercase)
A_ = [seq.replace(' ' , '') for seq in decoded_tok]
self.assertListEqual(_lowercase , _lowercase)
def __snake_case ( self : List[str]) -> str:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = None
A_ = self.prepare_image_inputs()
A_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def __snake_case ( self : List[str]) -> Any:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = torch.randn(1 , 27 , 38)
A_ = torch.randn(1 , 27 , 50_257)
A_ = torch.randn(1 , 27 , 30_522)
A_ = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'])
| 366 | 0 |
# Function to print upper half of diamond (pyramid)
def UpperCAmelCase__ ( UpperCAmelCase__ :Dict ):
'''simple docstring'''
for i in range(0 , UpperCAmelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
for i in range(UpperCAmelCase__ , 0 , -1 ):
for _ in range(UpperCAmelCase__ , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def UpperCAmelCase__ ( UpperCAmelCase__ :List[str] ):
'''simple docstring'''
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(UpperCAmelCase__ ) # upper half
reverse_floyd(UpperCAmelCase__ ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
A_ : Optional[Any] = 1
while K:
A_ : int = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
A_ : Optional[Any] = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 715 |
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Optional[int] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a = year // 1_00
a = (5 * (century % 4) + 2) % 7
a = year % 1_00
a = centurian % 12
a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32 | 0 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCamelCase_ ( snake_case_ : int ) -> List[Any]:
'''simple docstring'''
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(snake_case_ , """_dynamo""" ):
return False
return isinstance(snake_case_ , torch._dynamo.eval_frame.OptimizedModule )
def UpperCamelCase_ ( snake_case_ : Optional[Any] , snake_case_ : bool = True ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowerCAmelCase = is_compiled_module(snake_case_ )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = model.module
if not keep_fpaa_wrapper:
__lowerCAmelCase = getattr(snake_case_ , """forward""" )
__lowerCAmelCase = model.__dict__.pop("""_original_forward""" , snake_case_ )
if original_forward is not None:
while hasattr(snake_case_ , """__wrapped__""" ):
__lowerCAmelCase = forward.__wrapped__
if forward == original_forward:
break
__lowerCAmelCase = forward
if getattr(snake_case_ , """_converted_to_transformer_engine""" , snake_case_ ):
convert_model(snake_case_ , to_transformer_engine=snake_case_ )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = compiled_model
return model
def UpperCamelCase_ ( ) -> Any:
'''simple docstring'''
PartialState().wait_for_everyone()
def UpperCamelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Any ) -> Dict:
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(snake_case_ , snake_case_ )
elif PartialState().local_process_index == 0:
torch.save(snake_case_ , snake_case_ )
@contextmanager
def UpperCamelCase_ ( **snake_case_ : List[str] ) -> Any:
'''simple docstring'''
for key, value in kwargs.items():
__lowerCAmelCase = str(snake_case_ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCamelCase_ ( snake_case_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if not hasattr(snake_case_ , """__qualname__""" ) and not hasattr(snake_case_ , """__name__""" ):
__lowerCAmelCase = getattr(snake_case_ , """__class__""" , snake_case_ )
if hasattr(snake_case_ , """__qualname__""" ):
return obj.__qualname__
if hasattr(snake_case_ , """__name__""" ):
return obj.__name__
return str(snake_case_ )
def UpperCamelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for key, value in source.items():
if isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = destination.setdefault(snake_case_ , {} )
merge_dicts(snake_case_ , snake_case_ )
else:
__lowerCAmelCase = value
return destination
def UpperCamelCase_ ( snake_case_ : int = None ) -> bool:
'''simple docstring'''
if port is None:
__lowerCAmelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 427 | '''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase_ ( snake_case_ : int ) -> list[int]:
'''simple docstring'''
__lowerCAmelCase = str(snake_case_ )
__lowerCAmelCase = [n]
for i in range(1 , len(snake_case_ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def UpperCamelCase_ ( snake_case_ : int ) -> bool:
'''simple docstring'''
if len(str(snake_case_ ) ) > 3:
if not is_prime(int(str(snake_case_ )[-3:] ) ) or not is_prime(int(str(snake_case_ )[:3] ) ):
return False
return True
def UpperCamelCase_ ( snake_case_ : int = 11 ) -> list[int]:
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = 13
while len(snake_case_ ) != count:
if validate(snake_case_ ):
__lowerCAmelCase = list_truncated_nums(snake_case_ )
if all(is_prime(snake_case_ ) for i in list_nums ):
list_truncated_primes.append(snake_case_ )
num += 2
return list_truncated_primes
def UpperCamelCase_ ( ) -> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'{sum(compute_truncated_primes(11)) = }')
| 427 | 1 |
from __future__ import annotations
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[float] ):
"""simple docstring"""
__a = 0.00
__a = 0
for resistor in resistors:
if resistor <= 0:
__a = f"Resistor at index {index} has a negative or zero value!"
raise ValueError(_SCREAMING_SNAKE_CASE )
first_sum += 1 / float(_SCREAMING_SNAKE_CASE )
index += 1
return 1 / first_sum
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[float] ):
"""simple docstring"""
__a = 0.00
__a = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__a = f"Resistor at index {index} has a negative value!"
raise ValueError(_SCREAMING_SNAKE_CASE )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
from math import sqrt
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 1_0001 ):
"""simple docstring"""
__a = 0
__a = 1
while count != nth and number < 3:
number += 1
if is_prime(_SCREAMING_SNAKE_CASE ):
count += 1
while count != nth:
number += 2
if is_prime(_SCREAMING_SNAKE_CASE ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 547 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = "pytorch_model.bin"
@dataclasses.dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
_snake_case : Optional[str] = dataclasses.field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , )
@dataclasses.dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
_snake_case : str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
_snake_case : Optional[str] = dataclasses.field(
default=lowercase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
_snake_case : Optional[str] = dataclasses.field(
default=lowercase , metadata={"""help""": """The name of the task to train on."""} , )
_snake_case : Optional[List[str]] = dataclasses.field(
default=lowercase , metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
_snake_case : Optional[str] = dataclasses.field(
default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""} )
_snake_case : Optional[str] = dataclasses.field(
default="""no""" , metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} , )
_snake_case : Optional[int] = dataclasses.field(
default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
_snake_case : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} , )
_snake_case : Optional[bool] = dataclasses.field(
default=lowercase , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , )
_snake_case : Optional[bool] = dataclasses.field(
default=lowercase , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , )
_snake_case : Optional[bool] = dataclasses.field(
default=lowercase , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , )
_snake_case : Optional[float] = dataclasses.field(
default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , )
_snake_case : Optional[int] = dataclasses.field(
default=100 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , )
_snake_case : Optional[int] = dataclasses.field(
default=lowercase , metadata={"""help""": """Random seed for initialization."""} , )
def A ( lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[Any] ) -> List[str]:
UpperCamelCase__ :Union[str, Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCamelCase__ :List[str] = dataset.filter(lambda lowercase__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCamelCase__ :Any = int(eval_result * len(lowercase__ ) )
print(lowercase__ )
UpperCamelCase__ :Optional[Any] = dataset.sort("""probability""" , reverse=lowercase__ )
UpperCamelCase__ :Optional[Any] = dataset.select(range(lowercase__ ) )
UpperCamelCase__ :int = dataset.remove_columns(["""label""", """probability"""] )
UpperCamelCase__ :str = dataset.rename_column("""prediction""" , """label""" )
UpperCamelCase__ :Optional[Any] = dataset.map(lambda lowercase__ : {"label": idalabel[example["label"]]} )
UpperCamelCase__ :Dict = dataset.shuffle(seed=args.seed )
UpperCamelCase__ :int = os.path.join(lowercase__ , f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(lowercase__ , index=lowercase__ )
else:
dataset.to_json(lowercase__ )
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : Dict , **lowercase__ : Dict ) -> Tuple:
UpperCamelCase__ :int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCamelCase__ :List[str] = STModelArguments(model_name_or_path=lowercase__ )
UpperCamelCase__ :str = STDataArguments(train_file=lowercase__ , infer_file=lowercase__ )
UpperCamelCase__ :List[Any] = STTrainingArguments(output_dir=lowercase__ )
UpperCamelCase__ :List[Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowercase__ ).items():
setattr(lowercase__ , lowercase__ , lowercase__ )
for key, value in kwargs.items():
if hasattr(lowercase__ , lowercase__ ):
setattr(lowercase__ , lowercase__ , lowercase__ )
# Sanity checks
UpperCamelCase__ :Tuple = {}
UpperCamelCase__ :Tuple = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCamelCase__ :Optional[Any] = args.train_file
UpperCamelCase__ :Optional[int] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCamelCase__ :Optional[Any] = args.eval_file
for key in data_files:
UpperCamelCase__ :Dict = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
UpperCamelCase__ :Any = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
UpperCamelCase__ :Optional[int] = f"""{args.output_dir}/self-train_iter-{{}}""".format
UpperCamelCase__ :str = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowercase__ )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
accelerator.wait_for_everyone()
UpperCamelCase__ :Union[str, Any] = None
UpperCamelCase__ :str = None
UpperCamelCase__ :List[str] = 0
UpperCamelCase__ :Optional[int] = False
# Show the progress bar
UpperCamelCase__ :Optional[int] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCamelCase__ :int = data_dir_format(lowercase__ )
assert os.path.exists(lowercase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCamelCase__ :Tuple = os.path.join(lowercase__ , """stage-1""" )
UpperCamelCase__ :Tuple = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowercase__ , lowercase__ ):
arguments_dict.update({key: value} )
UpperCamelCase__ :Union[str, Any] = os.path.join(lowercase__ , """best-checkpoint""" , lowercase__ )
if os.path.exists(lowercase__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , lowercase__ , lowercase__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , lowercase__ )
finetune(**lowercase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase__ )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , lowercase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCamelCase__ :List[Any] = os.path.join(lowercase__ , """best-checkpoint""" )
UpperCamelCase__ :List[Any] = os.path.join(lowercase__ , """stage-2""" )
# Update arguments_dict
UpperCamelCase__ :List[Any] = model_path
UpperCamelCase__ :Any = data_files["""train"""]
UpperCamelCase__ :Optional[int] = current_output_dir
UpperCamelCase__ :str = os.path.join(lowercase__ , """best-checkpoint""" , lowercase__ )
if os.path.exists(lowercase__ ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , lowercase__ , lowercase__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , lowercase__ )
finetune(**lowercase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase__ )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , lowercase__ )
UpperCamelCase__ :Tuple = iteration
UpperCamelCase__ :List[Any] = data_dir_format(iteration + 1 )
UpperCamelCase__ :Optional[Any] = AutoConfig.from_pretrained(os.path.join(lowercase__ , """best-checkpoint""" ) )
UpperCamelCase__ :List[str] = config.idalabel
UpperCamelCase__ :Union[str, Any] = os.path.join(lowercase__ , """eval_results_best-checkpoint.json""" )
UpperCamelCase__ :Dict = os.path.join(lowercase__ , """test_results_best-checkpoint.json""" )
assert os.path.exists(lowercase__ )
with open(lowercase__ , """r""" ) as f:
UpperCamelCase__ :List[Any] = float(json.load(lowercase__ )[args.eval_metric] )
UpperCamelCase__ :Any = os.path.join(lowercase__ , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(lowercase__ )
# Loading the dataset from local csv or json files.
UpperCamelCase__ :Any = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
UpperCamelCase__ :Optional[int] = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(lowercase__ , exist_ok=lowercase__ )
shutil.copy(lowercase__ , os.path.join(lowercase__ , f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(lowercase__ ):
shutil.copy(lowercase__ , os.path.join(lowercase__ , f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.wait_for_everyone()
UpperCamelCase__ :List[str] = os.path.join(lowercase__ , f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCamelCase__ :List[Any] = eval_result
if best_iteration is None:
UpperCamelCase__ :Union[str, Any] = new_iteration
UpperCamelCase__ :List[str] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCamelCase__ :Optional[Any] = new_iteration
UpperCamelCase__ :Optional[int] = new_eval_result
UpperCamelCase__ :Optional[int] = 0
else:
if new_eval_result == best_eval_result:
UpperCamelCase__ :str = new_iteration
UpperCamelCase__ :Dict = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCamelCase__ :Optional[int] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , lowercase__ )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase__ , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(lowercase__ , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase__ , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(lowercase__ , """eval_results_best-iteration.json""" ) , ) | 45 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 164 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
a_ : List[str] = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
a_ : int = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
a_ : Optional[int] = max(len(lowerCamelCase_ ), len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ), b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 712 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> float:
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ) )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> None:
if point:
if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE__, (int, float) ):
a_ : int = (
"Expected a list of numbers as input, found "
F"""{type(SCREAMING_SNAKE_CASE__ ).__name__}"""
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
a_ : Dict = F"""Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE__ ).__name__}"""
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError("Missing an input" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> float:
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 370 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/config.json""",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : List[str] = '''xglm'''
__UpperCAmelCase : Dict = ['''past_key_values''']
__UpperCAmelCase : Optional[int] = {
'''num_attention_heads''': '''attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , UpperCamelCase__=25_6008 , UpperCamelCase__=2048 , UpperCamelCase__=1024 , UpperCamelCase__=4096 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = vocab_size
snake_case : Optional[int] = max_position_embeddings
snake_case : Optional[Any] = d_model
snake_case : List[Any] = ffn_dim
snake_case : Any = num_layers
snake_case : Optional[int] = attention_heads
snake_case : Union[str, Any] = activation_function
snake_case : int = dropout
snake_case : Optional[Any] = attention_dropout
snake_case : str = activation_dropout
snake_case : List[str] = layerdrop
snake_case : Optional[Any] = init_std
snake_case : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case : Tuple = use_cache
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 178 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __lowerCAmelCase ( lowercase : List[Any] , lowercase : Dict , lowercase : str ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = AlbertConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
snake_case : int = AlbertForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowercase , lowercase , lowercase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 178 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = CanineTokenizer
__UpperCAmelCase = False
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
snake_case__ : str = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def __magic_name__ ( self : Union[str, Any] , **snake_case_ : Optional[int] ):
'''simple docstring'''
snake_case__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
snake_case__ : Any = 1_0_2_4
return tokenizer
@require_torch
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : Any = self.canine_tokenizer
snake_case__ : Dict = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
snake_case__ : List[str] = [5_7_3_4_4, 7_6, 1_0_5, 1_0_2, 1_0_1, 3_2, 1_0_5, 1_1_5, 3_2, 1_0_8, 1_0_5, 1_0_7, 1_0_1, 3_2, 9_7, 3_2, 9_8, 1_1_1, 1_2_0, 3_2, 1_1_1, 1_0_2, 3_2, 9_9, 1_0_4, 1_1_1, 9_9, 1_1_1, 1_0_8, 9_7, 1_1_6, 1_0_1, 1_1_5, 4_6, 5_7_3_4_5, 0, 0, 0, 0]
# fmt: on
snake_case__ : Dict = tokenizer(snake_case_ , padding=snake_case_ , return_tensors='''pt''' )
self.assertIsInstance(snake_case_ , snake_case_ )
snake_case__ : Any = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertEqual((2, 3_9) , batch.input_ids.shape )
self.assertEqual((2, 3_9) , batch.attention_mask.shape )
@require_torch
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : int = self.canine_tokenizer
snake_case__ : int = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
snake_case__ : Dict = tokenizer(snake_case_ , padding=snake_case_ , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , snake_case_ )
self.assertIn('''attention_mask''' , snake_case_ )
self.assertIn('''token_type_ids''' , snake_case_ )
@require_torch
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : int = self.canine_tokenizer
snake_case__ : str = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
snake_case__ : List[Any] = tokenizer(
text_target=snake_case_ , max_length=3_2 , padding='''max_length''' , truncation=snake_case_ , return_tensors='''pt''' )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
snake_case__ : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case__ : str = tempfile.mkdtemp()
snake_case__ : Any = ''' He is very happy, UNwant\u00E9d,running'''
snake_case__ : str = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
tokenizer.save_pretrained(snake_case_ )
snake_case__ : Dict = tokenizer.__class__.from_pretrained(snake_case_ )
snake_case__ : List[str] = after_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
shutil.rmtree(snake_case_ )
snake_case__ : int = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : Any = ''' He is very happy, UNwant\u00E9d,running'''
snake_case__ : Dict = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
snake_case__ : Tuple = chr(0xE007 )
additional_special_tokens.append(snake_case_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
snake_case__ : Tuple = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
tokenizer.save_pretrained(snake_case_ )
snake_case__ : List[Any] = tokenizer.__class__.from_pretrained(snake_case_ )
snake_case__ : str = after_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertIn(snake_case_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
snake_case__ : List[Any] = tokenizer.__class__.from_pretrained(snake_case_ , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(snake_case_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : Dict = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case__ , snake_case__ : Dict = self.get_clean_sequence(snake_case_ )
# a special token for Canine can be defined as follows:
snake_case__ : Dict = 0xE005
snake_case__ : List[str] = chr(snake_case_ )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
snake_case__ : Optional[Any] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertEqual(len(snake_case_ ) , 1 )
snake_case__ : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=snake_case_ )
snake_case__ : Optional[int] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : Dict = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , input_encoded + special_token_id )
snake_case__ : Any = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Any = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case__ : List[str] = chr(0xE005 )
snake_case__ : Optional[int] = chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=snake_case_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
snake_case__ : List[Any] = tokenizer.tokenize(snake_case_ )
snake_case__ : Any = tokenizer.tokenize(snake_case_ )
self.assertEqual(len(snake_case_ ) , 1 )
self.assertEqual(len(snake_case_ ) , 1 )
self.assertEqual(token_a[0] , snake_case_ )
self.assertEqual(token_a[0] , snake_case_ )
@require_tokenizers
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Optional[int] = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# a special token for Canine can be defined as follows:
snake_case__ : Union[str, Any] = 0xE006
snake_case__ : Any = chr(snake_case_ )
snake_case__ : Any = AddedToken(snake_case_ , lstrip=snake_case_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(snake_case_ )
tokenizer.from_pretrained(snake_case_ )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case_ )
with open(os.path.join(snake_case_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
snake_case__ : Union[str, Any] = json.load(snake_case_ )
with open(os.path.join(snake_case_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
snake_case__ : List[Any] = json.load(snake_case_ )
# a special token for Canine can be defined as follows:
snake_case__ : Optional[Any] = 0xE006
snake_case__ : List[Any] = chr(snake_case_ )
snake_case__ : List[str] = [new_token_a]
snake_case__ : int = [new_token_a]
with open(os.path.join(snake_case_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(snake_case_ , snake_case_ )
with open(os.path.join(snake_case_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(snake_case_ , snake_case_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case__ : Dict = tokenizer_class.from_pretrained(snake_case_ , extra_ids=0 )
self.assertIn(snake_case_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
snake_case__ : str = 0xE007
snake_case__ : Dict = chr(snake_case_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case__ : Tuple = [AddedToken(snake_case_ , lstrip=snake_case_ )]
snake_case__ : Optional[int] = tokenizer_class.from_pretrained(
snake_case_ , additional_special_tokens=snake_case_ , extra_ids=0 )
self.assertIn(snake_case_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : Any = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case__ : str = '''hello world'''
if self.space_between_special_tokens:
snake_case__ : int = '''[CLS] hello world [SEP]'''
else:
snake_case__ : List[str] = input
snake_case__ : Optional[Any] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : Optional[int] = tokenizer.decode(snake_case_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(snake_case_ , [output, output.lower()] )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case__ : Union[str, Any] = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
snake_case__ : Tuple = '''a'''
snake_case__ : List[str] = ord(snake_case_ )
for attr in attributes_list:
setattr(snake_case_ , attr + '''_id''' , snake_case_ )
self.assertEqual(getattr(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(getattr(snake_case_ , attr + '''_id''' ) , snake_case_ )
setattr(snake_case_ , attr + '''_id''' , snake_case_ )
self.assertEqual(getattr(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(getattr(snake_case_ , attr + '''_id''' ) , snake_case_ )
setattr(snake_case_ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(snake_case_ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(snake_case_ , '''additional_special_tokens_ids''' ) , [] )
snake_case__ : Any = 0xE006
snake_case__ : List[Any] = chr(snake_case_ )
setattr(snake_case_ , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(snake_case_ , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(snake_case_ , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def __magic_name__ ( self : str ):
'''simple docstring'''
pass
def __magic_name__ ( self : str ):
'''simple docstring'''
pass
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
pass
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
pass
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
pass
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
pass
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __magic_name__ ( self : Dict ):
'''simple docstring'''
pass
| 502 |
'''simple docstring'''
def _a ( __lowerCAmelCase : int ):
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
snake_case__ : Any = 4
snake_case__ : int = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ : Tuple = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 502 | 1 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def snake_case ( self ):
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def snake_case ( self ):
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = pa.array(TypedSequence([1, 2, 3] ,try_type=Value('bool' ) ,type=Value('int64' ) ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = pa.array(TypedSequence([1, 2, 3] ,type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def snake_case ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE_ : Any = pa.array(TypedSequence(['foo', 'bar'] ,type=Value('int64' ) ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = pa.array(TypedSequence([1, 2, 3] ,try_type=Value('int32' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.array(TypedSequence(['foo', 'bar'] ,try_type=Value('int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def snake_case ( self ):
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE_ : Any = pa.array(TypedSequence(['foo', 'bar'] ,type=ArrayaD((1, 3) ,'int64' ) ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'int64' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = pa.array(TypedSequence(['foo', 'bar'] ,try_type=ArrayaD((1, 3) ,'int64' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def snake_case ( self ):
import PIL.Image
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' ,side_effect=snake_case__ ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE_ : Optional[int] = pa.array(TypedSequence([{'path': None, 'bytes': b'image_bytes'}, pil_image] ,type=Image() ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' ,snake_case__ )
self.assertFalse(kwargs['optimize_list_casting'] )
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = pa.BufferReader(lowerCamelCase_ ) if isinstance(lowerCamelCase_ , pa.Buffer ) else pa.memory_map(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = pa.ipc.open_stream(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_ : Any = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE_ : str = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __UpperCAmelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_ : int = Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=lowerCamelCase_ , features=lowerCamelCase_ ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE_ : List[str] = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE_ : Tuple = pa.ipc.open_stream(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : pa.Table = f.read_all()
SCREAMING_SNAKE_CASE_ : int = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCamelCase_ )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ , hash_salt='split_name' , check_duplicates=lowerCamelCase_ , ) as writer:
with pytest.raises(lowerCamelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=[1, 2] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ , hash_salt='split_name' , check_duplicates=lowerCamelCase_ , ) as writer:
with pytest.raises(lowerCamelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=10 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=10 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def __UpperCAmelCase ( lowerCamelCase_ : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ , hash_salt='split_name' , check_duplicates=lowerCamelCase_ , ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} , key=1 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_ : int = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE_ : str = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_ : int = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_ : Optional[int] = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE_ : Any = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Optional[int] = {'col_1': pa.string(), 'col_2': pa.intaa()}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(lowerCamelCase_ , 'test.arrow' )
with ArrowWriter(path=lowerCamelCase_ , schema=pa.schema(lowerCamelCase_ ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(lowerCamelCase_ , 1 )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if pa.types.is_list(lowerCamelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
if isinstance(lst[0] , lowerCamelCase_ ):
change_first_primitive_element_in_list(lst[0] , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : int = value
@pytest.mark.parametrize('optimized_int_type, expected_dtype' , [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = pa.array(TypedSequence(lowerCamelCase_ , optimized_int_type=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype' , [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
] , )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = pa.array(OptimizedTypedSequence(lowerCamelCase_ , col=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pa.array(OptimizedTypedSequence(lowerCamelCase_ , col=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception' , [False, True] )
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=lowerCamelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 'mock://dataset-train.arrow'
with ArrowWriter(path=lowerCamelCase_ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(lowerCamelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCamelCase_ )
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.BufferOutputStream()
with ParquetWriter(stream=lowerCamelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE_ : pa.Table = pq.read_table(lowerCamelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files' , [False, True] )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ) -> Union[str, Any]:
"""simple docstring"""
import PIL.Image
SCREAMING_SNAKE_CASE_ : Tuple = str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowerCamelCase_ , format='png' )
SCREAMING_SNAKE_CASE_ : Optional[int] = pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCamelCase_ , features=Features({'image': Image()} ) , embed_local_files=lowerCamelCase_ ) as writer:
writer.write({'image': image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE_ : Tuple = pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE_ : pa.Table = pq.read_table(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'] , lowerCamelCase_ )
with open(lowerCamelCase_ , 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = pa.schema([pa.field('col_1' , pa.string() , nullable=lowerCamelCase_ )] )
SCREAMING_SNAKE_CASE_ : List[Any] = pa.BufferOutputStream()
with ArrowWriter(stream=lowerCamelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCamelCase_ )
assert writer._schema == pa.schema([pa.field('col_1' , pa.string() )] )
| 105 |
import pytest
import datasets
# Import fixture modules as plugins
__snake_case = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _A ( _lowercase ) -> str:
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = tmp_path_factory.getbasetemp() / 'cache'
__UpperCamelCase = test_hf_cache_home / 'datasets'
__UpperCamelCase = test_hf_cache_home / 'metrics'
__UpperCamelCase = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_lowercase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_lowercase ) )
__UpperCamelCase = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_lowercase ) )
@pytest.fixture(autouse=_lowercase , scope='session' )
def _A ( ) -> Dict:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowercase )
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _lowercase )
@pytest.fixture
def _A ( _lowercase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _lowercase )
| 1 | 0 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _SCREAMING_SNAKE_CASE( snake_case_ : Union[str, Any] ) ->Tuple: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _SCREAMING_SNAKE_CASE( ) ->str:
'''simple docstring'''
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
_lowercase : str = [1, 2, 3]
with pytest.raises(snake_case_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(snake_case_ , snake_case_ , num_proc=2 )
with pytest.raises(snake_case_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(snake_case_ , snake_case_ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def _SCREAMING_SNAKE_CASE( snake_case_ : Tuple ) ->Dict:
'''simple docstring'''
_lowercase : Any = [1, 2]
_lowercase : str = {'''a''': 1, '''b''': 2}
_lowercase : Tuple = {'''a''': [1, 2], '''b''': [3, 4]}
_lowercase : Optional[Any] = {'''a''': {'''1''': 1}, '''b''': 2}
_lowercase : int = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
_lowercase : Dict = [2, 3]
_lowercase : Optional[int] = {'''a''': 2, '''b''': 3}
_lowercase : Dict = {'''a''': [2, 3], '''b''': [4, 5]}
_lowercase : Optional[int] = {'''a''': {'''1''': 2}, '''b''': 3}
_lowercase : Union[str, Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) == expected_map_nested_sa
| 411 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
snake_case_ = StableDiffusionSAGPipeline
snake_case_ = TEXT_TO_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ = False
def __lowercase ( self : Dict ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_lowercase : int = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
_lowercase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_lowercase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
_lowercase : Dict = CLIPTextModel(UpperCamelCase_ )
_lowercase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowercase : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowercase ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any]=0 ) -> Any:
'''simple docstring'''
if str(UpperCamelCase_ ).startswith('''mps''' ):
_lowercase : Any = torch.manual_seed(UpperCamelCase_ )
else:
_lowercase : Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_lowercase : Union[str, Any] = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self : List[Any] ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowercase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : str ) -> List[Any]:
'''simple docstring'''
_lowercase : Tuple = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_lowercase : int = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : Union[str, Any] = '''.'''
_lowercase : Union[str, Any] = torch.manual_seed(0 )
_lowercase : Tuple = sag_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
_lowercase : int = output.images
_lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase : List[Any] = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowercase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : str = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_lowercase : str = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : int = '''.'''
_lowercase : Tuple = torch.manual_seed(0 )
_lowercase : int = sag_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
_lowercase : Union[str, Any] = output.images
_lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase : List[str] = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __lowercase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_lowercase : List[Any] = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowercase : Optional[int] = '''.'''
_lowercase : Any = torch.manual_seed(0 )
_lowercase : int = sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
_lowercase : Dict = output.images
assert image.shape == (1, 512, 768, 3)
| 411 | 1 |
'''simple docstring'''
from collections.abc import Generator
def __A ( ):
_UpperCAmelCase , _UpperCAmelCase : Any = 0, 1
while True:
_UpperCAmelCase , _UpperCAmelCase : Tuple = b, a + b
yield b
def __A ( lowerCAmelCase_ = 1000 ):
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : List[Any] = fibonacci_generator()
while len(str(next(lowerCAmelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 414 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__a )
class __lowerCAmelCase ( __a ):
def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def snake_case_ (self , lowerCAmelCase__=None ):
_UpperCAmelCase : Any = {}
if top_k is not None:
_UpperCAmelCase : Tuple = top_k
return {}, {}, postprocess_params
def __call__(self , lowerCAmelCase__ , **lowerCAmelCase__ ):
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = load_image(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework )
return model_inputs
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Any = self.model(**lowerCAmelCase__ )
return model_outputs
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=5 ):
if top_k > self.model.config.num_labels:
_UpperCAmelCase : Dict = self.model.config.num_labels
if self.framework == "pt":
_UpperCAmelCase : Tuple = model_outputs.logits.softmax(-1 )[0]
_UpperCAmelCase , _UpperCAmelCase : Dict = probs.topk(lowerCAmelCase__ )
elif self.framework == "tf":
_UpperCAmelCase : Dict = stable_softmax(model_outputs.logits , axis=-1 )[0]
_UpperCAmelCase : Any = tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : str = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
_UpperCAmelCase : str = scores.tolist()
_UpperCAmelCase : Tuple = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
| 414 | 1 |
from __future__ import annotations
from math import gcd
def _SCREAMING_SNAKE_CASE ( a , a = 2 , a = 1 , a = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(a , a , a ) -> int:
return (pow(a , 2 ) + step) % modulus
for _ in range(a ):
# These track the position within the cycle detection logic.
__A : Dict = seed
__A : Optional[int] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__A : Tuple = rand_fn(a , a , a )
__A : Dict = rand_fn(a , a , a )
__A : Optional[Any] = rand_fn(a , a , a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__A : Optional[int] = gcd(hare - tortoise , a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__A : int = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
UpperCAmelCase : Optional[Any] = parser.parse_args()
UpperCAmelCase : Tuple = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
UpperCAmelCase : Tuple = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 77 |
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if len(a ) != 2 or len(a[0] ) != 2 or len(a ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__A : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a ) )
]
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[int]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a ) )
]
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[list, list, list, list]:
if len(a ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__A : str = len(a )
__A : List[Any] = matrix_length // 2
__A : List[str] = [[a[i][j] for j in range(a , a )] for i in range(a )]
__A : Dict = [
[a[i][j] for j in range(a , a )] for i in range(a , a )
]
__A : int = [[a[i][j] for j in range(a )] for i in range(a )]
__A : Any = [[a[i][j] for j in range(a )] for i in range(a , a )]
return top_left, top_right, bot_left, bot_right
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[int, int]:
return len(a ), len(matrix[0] )
def _SCREAMING_SNAKE_CASE ( a ) -> None:
print('\n'.join(str(a ) for line in matrix ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if matrix_dimensions(a ) == (2, 2):
return default_matrix_multiplication(a , a )
__A , __A , __A , __A : str = split_matrix(a )
__A , __A , __A , __A : List[Any] = split_matrix(a )
__A : Any = actual_strassen(a , matrix_subtraction(a , a ) )
__A : Tuple = actual_strassen(matrix_addition(a , a ) , a )
__A : List[str] = actual_strassen(matrix_addition(a , a ) , a )
__A : Optional[int] = actual_strassen(a , matrix_subtraction(a , a ) )
__A : Any = actual_strassen(matrix_addition(a , a ) , matrix_addition(a , a ) )
__A : Any = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) )
__A : List[Any] = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) )
__A : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a , a ) , a ) , a )
__A : Union[str, Any] = matrix_addition(a , a )
__A : str = matrix_addition(a , a )
__A : Dict = matrix_subtraction(matrix_subtraction(matrix_addition(a , a ) , a ) , a )
# construct the new matrix from our 4 quadrants
__A : List[Any] = []
for i in range(len(a ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if matrix_dimensions(a )[1] != matrix_dimensions(a )[0]:
__A : Dict = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a )
__A : int = matrix_dimensions(a )
__A : Any = matrix_dimensions(a )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__A : List[Any] = max(*a , *a )
__A : Optional[Any] = int(math.pow(2 , math.ceil(math.loga(a ) ) ) )
__A : Union[str, Any] = matrixa
__A : Optional[int] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__A : str = actual_strassen(a , a )
# Removing the additional zeros
for i in range(0 , a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 77 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
SCREAMING_SNAKE_CASE = {
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 199 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=A ):
'''simple docstring'''
lowercase_ : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : List[str] ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase ( cls : Optional[int] , *snake_case__ : Any , **snake_case__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase ( cls : List[str] , *snake_case__ : Any , **snake_case__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCAmelCase_ ( metaclass=A ):
'''simple docstring'''
lowercase_ : Tuple = ["torch", "transformers", "onnx"]
def __init__( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Any ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase ( cls : Dict , *snake_case__ : Any , **snake_case__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase ( cls : int , *snake_case__ : Dict , **snake_case__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCAmelCase_ ( metaclass=A ):
'''simple docstring'''
lowercase_ : Optional[int] = ["torch", "transformers", "onnx"]
def __init__( self : str , *snake_case__ : Optional[int] , **snake_case__ : str ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase ( cls : Any , *snake_case__ : List[str] , **snake_case__ : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase ( cls : str , *snake_case__ : Dict , **snake_case__ : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCAmelCase_ ( metaclass=A ):
'''simple docstring'''
lowercase_ : int = ["torch", "transformers", "onnx"]
def __init__( self : Any , *snake_case__ : List[str] , **snake_case__ : str ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase ( cls : List[str] , *snake_case__ : List[str] , **snake_case__ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase ( cls : Optional[int] , *snake_case__ : List[Any] , **snake_case__ : int ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCAmelCase_ ( metaclass=A ):
'''simple docstring'''
lowercase_ : Dict = ["torch", "transformers", "onnx"]
def __init__( self : Any , *snake_case__ : Optional[int] , **snake_case__ : Any ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase ( cls : Union[str, Any] , *snake_case__ : str , **snake_case__ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase ( cls : int , *snake_case__ : int , **snake_case__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
class UpperCAmelCase_ ( metaclass=A ):
'''simple docstring'''
lowercase_ : Dict = ["torch", "transformers", "onnx"]
def __init__( self : List[str] , *snake_case__ : Tuple , **snake_case__ : int ):
'''simple docstring'''
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase ( cls : Optional[int] , *snake_case__ : Tuple , **snake_case__ : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def UpperCamelCase ( cls : List[str] , *snake_case__ : Dict , **snake_case__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 199 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 718 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = BertConfig.from_json_file(_A )
print(f"Building PyTorch model from configuration: {config}" )
snake_case_ = BertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_A , _A , _A )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 139 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : str = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase : List[Any] = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_))))
__UpperCAmelCase : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__UpperCAmelCase : str = {"unk_token": "<unk>"}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
__UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCamelCase_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCamelCase_))
__UpperCAmelCase : str = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
__UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , UpperCamelCase_)
with open(self.image_processor_file , "w" , encoding="utf-8") as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Any , **UpperCamelCase_ : Dict):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **UpperCamelCase_)
def a_ ( self : Union[str, Any] , **UpperCamelCase_ : List[Any]):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **UpperCamelCase_)
def a_ ( self : Tuple , **UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__UpperCAmelCase : List[str] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : int = self.get_rust_tokenizer()
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
processor_slow.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_)
__UpperCAmelCase : List[Any] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
processor_fast.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_)
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
__UpperCAmelCase : str = self.get_image_processor(do_normalize=UpperCamelCase_)
__UpperCAmelCase : Any = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase_)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCamelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCamelCase_)
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : str = self.prepare_image_inputs()
__UpperCAmelCase : Union[str, Any] = image_processor(UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : List[str] = processor(images=UpperCamelCase_ , return_tensors="np")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.get_image_processor()
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : str = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Tuple = "lower newer"
__UpperCAmelCase : List[Any] = processor(text=UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : Any = tokenizer(UpperCamelCase_ , return_tensors="np")
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist())
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = "lower newer"
__UpperCAmelCase : str = self.prepare_image_inputs()
__UpperCAmelCase : Any = processor(text=UpperCamelCase_ , images=UpperCamelCase_)
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Dict = "google/owlvit-base-patch32"
__UpperCAmelCase : str = OwlViTProcessor.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : int = ["cat", "nasa badge"]
__UpperCAmelCase : Dict = processor(text=UpperCamelCase_)
__UpperCAmelCase : List[Any] = 16
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape , (2, seq_length))
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "google/owlvit-base-patch32"
__UpperCAmelCase : Tuple = OwlViTProcessor.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = [["cat", "nasa badge"], ["person"]]
__UpperCAmelCase : List[str] = processor(text=UpperCamelCase_)
__UpperCAmelCase : List[str] = 16
__UpperCAmelCase : str = len(UpperCamelCase_)
__UpperCAmelCase : List[str] = max([len(UpperCamelCase_) for texts in input_texts])
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length))
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Tuple = "google/owlvit-base-patch32"
__UpperCAmelCase : Dict = OwlViTProcessor.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : Any = ["cat", "nasa badge"]
__UpperCAmelCase : Optional[int] = processor(text=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = 16
__UpperCAmelCase : Optional[Any] = inputs["input_ids"]
__UpperCAmelCase : Any = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape , (2, seq_length))
self.assertListEqual(list(input_ids[0]) , predicted_ids[0])
self.assertListEqual(list(input_ids[1]) , predicted_ids[1])
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Any = self.prepare_image_inputs()
__UpperCAmelCase : List[Any] = self.prepare_image_inputs()
__UpperCAmelCase : Optional[Any] = processor(images=UpperCamelCase_ , query_images=UpperCamelCase_)
self.assertListEqual(list(inputs.keys()) , ["query_pixel_values", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : str = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : List[str] = processor.batch_decode(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
| 77 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Optional[int] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
SCREAMING_SNAKE_CASE_ : str = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
SCREAMING_SNAKE_CASE_ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
SCREAMING_SNAKE_CASE_ : Optional[int] = subset[i - 1][j]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE_ : int = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
stooge(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
SCREAMING_SNAKE_CASE_ : Any = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE , i + t , (SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowerCAmelCase__: List[Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase__: Optional[Any] = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 311 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.