code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from collections.abc import Callable
class __magic_name__ :
def __init__( self , _lowercase = None )-> None:
UpperCamelCase_ = []
# Stores indexes of each item for supporting updates and deletion.
UpperCamelCase_ = {}
# Stores current size of heap.
UpperCamelCase_ = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCamelCase_ = key or (lambda _lowercase : x)
def UpperCAmelCase_ ( self , _lowercase )-> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def UpperCAmelCase_ ( self , _lowercase )-> int | None:
UpperCamelCase_ = int(2 * i + 1 )
return left if 0 < left < self.size else None
def UpperCAmelCase_ ( self , _lowercase )-> int | None:
UpperCamelCase_ = int(2 * i + 2 )
return right if 0 < right < self.size else None
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> None:
UpperCamelCase_ , UpperCamelCase_ = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCamelCase_ , UpperCamelCase_ = self.arr[j], self.arr[i]
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> bool:
return self.arr[i][1] < self.arr[j][1]
def UpperCAmelCase_ ( self , _lowercase )-> int:
UpperCamelCase_ = self._left(_UpperCAmelCase )
UpperCamelCase_ = self._right(_UpperCAmelCase )
UpperCamelCase_ = i
if left is not None and not self._cmp(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = left
if right is not None and not self._cmp(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = right
return valid_parent
def UpperCAmelCase_ ( self , _lowercase )-> None:
UpperCamelCase_ = self._parent(_UpperCAmelCase )
while parent is not None and not self._cmp(_UpperCAmelCase , _UpperCAmelCase ):
self._swap(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = parent, self._parent(_UpperCAmelCase )
def UpperCAmelCase_ ( self , _lowercase )-> None:
UpperCamelCase_ = self._get_valid_parent(_UpperCAmelCase )
while valid_parent != index:
self._swap(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = valid_parent, self._get_valid_parent(_UpperCAmelCase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> None:
if item not in self.pos_map:
return
UpperCamelCase_ = self.pos_map[item]
UpperCamelCase_ = [item, self.key(_UpperCAmelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_UpperCAmelCase )
self._heapify_down(_UpperCAmelCase )
def UpperCAmelCase_ ( self , _lowercase )-> None:
if item not in self.pos_map:
return
UpperCamelCase_ = self.pos_map[item]
del self.pos_map[item]
UpperCamelCase_ = self.arr[self.size - 1]
UpperCamelCase_ = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_UpperCAmelCase )
self._heapify_down(_UpperCAmelCase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> None:
UpperCamelCase_ = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_UpperCAmelCase )] )
else:
UpperCamelCase_ = [item, self.key(_UpperCAmelCase )]
UpperCamelCase_ = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def UpperCAmelCase_ ( self )-> tuple | None:
return self.arr[0] if self.size else None
def UpperCAmelCase_ ( self )-> tuple | None:
UpperCamelCase_ = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCAmelCase( )-> Tuple:
"""simple docstring"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 628 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Dict=36 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = 300
return config
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = MraModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = MraModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = MraForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = ()
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MraModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason="MRA does not output attentions" )
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
UpperCAmelCase_ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 82 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Dict = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class SCREAMING_SNAKE_CASE:
def __init__( self , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase = str(id_ )
__lowercase = None
__lowercase = None
__lowercase = []
__lowercase = {} # {vertex:distance}
def __lt__( self , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
return self.key < other.key
def __repr__( self ) -> Any:
"""simple docstring"""
return self.id
def snake_case__ ( self , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
self.neighbors.append(lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
__lowercase = weight
def snake_case_ ( a__ : Optional[int] ,a__ : List[str] ,a__ : Dict ,a__ : List[str] ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] ,a__ )
graph[b - 1].add_edge(graph[a - 1] ,a__ )
def snake_case_ ( a__ : list ,a__ : Vertex ):
"""simple docstring"""
__lowercase = []
for u in graph:
__lowercase = math.inf
__lowercase = None
__lowercase = 0
__lowercase = graph[:]
while q:
__lowercase = min(a__ )
q.remove(a__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__lowercase = u
__lowercase = u.edges[v.id]
for i in range(1 ,len(a__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case_ ( a__ : list ,a__ : Vertex ):
"""simple docstring"""
for u in graph:
__lowercase = math.inf
__lowercase = None
__lowercase = 0
__lowercase = list(a__ )
hq.heapify(a__ )
while h:
__lowercase = hq.heappop(a__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__lowercase = u
__lowercase = u.edges[v.id]
hq.heapify(a__ )
for i in range(1 ,len(a__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163 | 0 |
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self ):
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = {}
def lowerCamelCase_ ( self , UpperCAmelCase ):
if vertex not in self.adjacency:
__lowerCamelCase = {}
self.num_vertices += 1
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
self.add_vertex(UpperCAmelCase )
self.add_vertex(UpperCAmelCase )
if head == tail:
return
__lowerCamelCase = weight
__lowerCamelCase = weight
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.get_edges()
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
edges.remove((tail, head, weight) )
for i in range(len(UpperCAmelCase ) ):
__lowerCamelCase = list(edges[i] )
edges.sort(key=lambda UpperCAmelCase : e[2] )
for i in range(len(UpperCAmelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__lowerCamelCase = edges[i][2] + 1
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
__lowerCamelCase = weight
__lowerCamelCase = weight
def __str__( self ):
__lowerCamelCase = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__lowerCamelCase = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def lowerCamelCase_ ( self ):
__lowerCamelCase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCamelCase_ ( self ):
return self.adjacency.keys()
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase=None , UpperCAmelCase=None ):
__lowerCamelCase = Graph()
if vertices is None:
__lowerCamelCase = []
if edges is None:
__lowerCamelCase = []
for vertex in vertices:
g.add_vertex(UpperCAmelCase )
for edge in edges:
g.add_edge(*UpperCAmelCase )
return g
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self ):
__lowerCamelCase = {}
__lowerCamelCase = {}
def __len__( self ):
return len(self.parent )
def lowerCamelCase_ ( self , UpperCAmelCase ):
if item in self.parent:
return self.find(UpperCAmelCase )
__lowerCamelCase = item
__lowerCamelCase = 0
return item
def lowerCamelCase_ ( self , UpperCAmelCase ):
if item not in self.parent:
return self.make_set(UpperCAmelCase )
if item != self.parent[item]:
__lowerCamelCase = self.find(self.parent[item] )
return self.parent[item]
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.find(UpperCAmelCase )
__lowerCamelCase = self.find(UpperCAmelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__lowerCamelCase = roota
return roota
if self.rank[roota] < self.rank[roota]:
__lowerCamelCase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__lowerCamelCase = roota
return roota
return None
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase ):
__lowerCamelCase = graph.num_vertices
__lowerCamelCase = Graph.UnionFind()
__lowerCamelCase = []
while num_components > 1:
__lowerCamelCase = {}
for vertex in graph.get_vertices():
__lowerCamelCase = -1
__lowerCamelCase = graph.get_edges()
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
edges.remove((tail, head, weight) )
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
__lowerCamelCase = union_find.find(UpperCAmelCase )
__lowerCamelCase = union_find.find(UpperCAmelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCamelCase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCamelCase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = cheap_edge[vertex]
if union_find.find(UpperCAmelCase ) != union_find.find(UpperCAmelCase ):
union_find.union(UpperCAmelCase , UpperCAmelCase )
mst_edges.append(cheap_edge[vertex] )
__lowerCamelCase = num_components - 1
__lowerCamelCase = Graph.build(edges=UpperCAmelCase )
return mst
| 479 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_a : int = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_a : Tuple = concatenate_datasets
_a : Optional[int] = DownloadConfig
_a : Any = DownloadManager
_a : Dict = DownloadMode
_a : str = DownloadConfig
_a : int = DownloadMode
_a : Tuple = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 479 | 1 |
'''simple docstring'''
from __future__ import annotations
A : Optional[Any] = list[list[int]]
# assigning initial values to the grid
A : int = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A : Optional[int] = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) ->int:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowercase_ ( lowercase__ ) ->Tuple:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowercase_ ( lowercase__ ) ->List[str]:
if location := find_empty_location(lowercase__ ):
_snake_case , _snake_case: Union[str, Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_snake_case: Dict = digit
if sudoku(lowercase__ ) is not None:
return grid
_snake_case: List[Any] = 0
return None
def lowercase_ ( lowercase__ ) ->Union[str, Any]:
for row in grid:
for cell in row:
print(lowercase__ , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
A : Any = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 711 |
'''simple docstring'''
A : str = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
A : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def lowercase_ ( lowercase__ ) ->str:
_snake_case: Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase_ ( lowercase__ ) ->str:
if set(lowercase__ ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
_snake_case: List[Any] = ''
for word in coded.split():
while len(lowercase__ ) != 0:
decoded += decode_dict[word[:5]]
_snake_case: Optional[int] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 273 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _snake_case ( a__ ):
@staticmethod
@abstractmethod
def snake_case__ ( _lowerCamelCase):
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
raise NotImplementedError() | 407 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__A =2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__A ={
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.1_5},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
__A ={}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__A ='facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__A ='allenai'
def _UpperCamelCase ( UpperCamelCase__ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCAmelCase__ : Union[str, Any] = dict((re.sub(R"""@@$""" , """""" , UpperCamelCase__ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , UpperCamelCase__ ), v) for k, v in d.items() )
UpperCAmelCase__ : Optional[int] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
UpperCAmelCase__ : str = d[k] # restore
return da
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
# prep
assert os.path.exists(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
UpperCAmelCase__ : str = basename(UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = dirname(UpperCamelCase__ )
UpperCAmelCase__ : List[str] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
UpperCAmelCase__ : Optional[int] = cls.hub_models()
UpperCAmelCase__ : Optional[int] = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
UpperCAmelCase__ : Union[str, Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
UpperCAmelCase__ : int = hub_utils.from_pretrained(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , archive_map=UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = vars(chkpt["""args"""]["""model"""] )
UpperCAmelCase__ : Dict = args["""source_lang"""]
UpperCAmelCase__ : int = args["""target_lang"""]
UpperCAmelCase__ : Dict = dirname(UpperCamelCase__ )
UpperCAmelCase__ : Any = basename(UpperCamelCase__ )
# dicts
UpperCAmelCase__ : Any = os.path.join(UpperCamelCase__ , f'''dict.{src_lang}.txt''' )
UpperCAmelCase__ : List[str] = os.path.join(UpperCamelCase__ , f'''dict.{tgt_lang}.txt''' )
UpperCAmelCase__ : str = Dictionary.load(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase__ : List[Any] = len(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = os.path.join(UpperCamelCase__ , """vocab-src.json""" )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
UpperCAmelCase__ : List[str] = True
for k in src_vocab.keys():
if not k.islower():
UpperCAmelCase__ : Optional[int] = False
break
UpperCAmelCase__ : Tuple = Dictionary.load(UpperCamelCase__ )
UpperCAmelCase__ : Any = rewrite_dict_keys(tgt_dict.indices )
UpperCAmelCase__ : Tuple = len(UpperCamelCase__ )
UpperCAmelCase__ : Any = os.path.join(UpperCamelCase__ , """vocab-tgt.json""" )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# merges_file (bpecodes)
UpperCAmelCase__ : List[str] = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
UpperCAmelCase__ : int = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
break
with open(UpperCamelCase__ , encoding="""utf-8""" ) as fin:
UpperCAmelCase__ : int = fin.read()
UpperCAmelCase__ : Union[str, Any] = re.sub(R""" \d+$""" , """""" , UpperCamelCase__ , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(UpperCamelCase__ )
# model config
UpperCAmelCase__ : Tuple = os.path.join(UpperCamelCase__ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
UpperCAmelCase__ : int = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
UpperCAmelCase__ : List[Any] = 5
UpperCAmelCase__ : str = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
UpperCAmelCase__ : Any = best_score_hparams[model_dir]["""length_penalty"""]
else:
UpperCAmelCase__ : Dict = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# tokenizer config
UpperCAmelCase__ : List[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1_0_2_4,
"""do_lower_case""": do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# model
UpperCAmelCase__ : Dict = chkpt["""models"""][0]
UpperCAmelCase__ : int = model.state_dict()
# rename keys to start with 'model.'
UpperCAmelCase__ : List[Any] = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
UpperCAmelCase__ : Dict = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = FSMTConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = FSMTForConditionalGeneration(UpperCamelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
# save
UpperCAmelCase__ : Optional[int] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A =parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path) | 407 | 1 |
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def lowerCamelCase__ ( _lowerCamelCase : Callable ) -> Callable:
@wraps(_lowerCamelCase )
def _inner_fn(*_lowerCamelCase : int , **_lowerCamelCase : List[Any] ):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , _lowerCamelCase , )
return fn(*_lowerCamelCase , **_lowerCamelCase )
return _inner_fn
| 137 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class a ( __snake_case ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Distribution , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=0 ) -> Optional[Any]:
lowerCamelCase_ = 1.0 if scale is None else scale
lowerCamelCase_ = 0.0 if loc is None else loc
super().__init__(__SCREAMING_SNAKE_CASE , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__SCREAMING_SNAKE_CASE )] )
@property
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCamelCase ( self : int ) -> Any:
return self.base_dist.variance * self.scale**2
@property
def UpperCamelCase ( self : List[Any] ) -> str:
return self.variance.sqrt()
class a ( nn.Module ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : Callable[..., Tuple[torch.Tensor]] , **__SCREAMING_SNAKE_CASE : str ) -> None:
super().__init__(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = args_dim
lowerCamelCase_ = nn.ModuleList([nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for dim in args_dim.values()] )
lowerCamelCase_ = domain_map
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Tuple[torch.Tensor]:
lowerCamelCase_ = [proj(__SCREAMING_SNAKE_CASE ) for proj in self.proj]
return self.domain_map(*__SCREAMING_SNAKE_CASE )
class a ( nn.Module ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
super().__init__()
lowerCamelCase_ = function
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]:
return self.function(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
class a :
SCREAMING_SNAKE_CASE : type
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Dict[str, int]
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int = 1 ) -> None:
lowerCamelCase_ = dim
lowerCamelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
if self.dim == 1:
return self.distribution_class(*__SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(*__SCREAMING_SNAKE_CASE ) , 1 )
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , ) -> Distribution:
lowerCamelCase_ = self._base_distribution(__SCREAMING_SNAKE_CASE )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__SCREAMING_SNAKE_CASE , loc=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , event_dim=self.event_dim )
@property
def UpperCamelCase ( self : int ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCamelCase ( self : Optional[Any] ) -> int:
return len(self.event_shape )
@property
def UpperCamelCase ( self : List[Any] ) -> float:
return 0.0
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> nn.Module:
return ParameterProjection(
in_features=__SCREAMING_SNAKE_CASE , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def UpperCamelCase ( self : List[str] , *__SCREAMING_SNAKE_CASE : torch.Tensor ) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCamelCase ( __SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(__SCREAMING_SNAKE_CASE ) + 4.0 )) / 2.0
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
SCREAMING_SNAKE_CASE : type = StudentT
@classmethod
def UpperCamelCase ( cls : str , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Optional[Any]:
lowerCamelCase_ = cls.squareplus(__SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase_ = 2.0 + cls.squareplus(__SCREAMING_SNAKE_CASE )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Dict[str, int] = {"loc": 1, "scale": 1}
SCREAMING_SNAKE_CASE : type = Normal
@classmethod
def UpperCamelCase ( cls : Dict , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Tuple:
lowerCamelCase_ = cls.squareplus(__SCREAMING_SNAKE_CASE ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Dict[str, int] = {"total_count": 1, "logits": 1}
SCREAMING_SNAKE_CASE : type = NegativeBinomial
@classmethod
def UpperCamelCase ( cls : Dict , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Optional[Any]:
lowerCamelCase_ = cls.squareplus(__SCREAMING_SNAKE_CASE )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Distribution:
lowerCamelCase_ , lowerCamelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE )
else:
return Independent(self.distribution_class(total_count=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE ) , 1 )
def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ) -> Distribution:
lowerCamelCase_ , lowerCamelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 137 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCamelCase ( A__ ):
'''simple docstring'''
A_ : Optional[Any] = ["""image_processor""", """tokenizer"""]
A_ : Union[str, Any] = """BlipImageProcessor"""
A_ : int = """AutoTokenizer"""
def __init__( self : str , _A : int , _A : int , _A : Optional[Any] ) -> int:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
__magic_name__ : Optional[int] = qformer_tokenizer
def __call__( self : Optional[Any] , _A : ImageInput = None , _A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _A : bool = True , _A : Union[bool, str, PaddingStrategy] = False , _A : Union[bool, str, TruncationStrategy] = None , _A : Optional[int] = None , _A : int = 0 , _A : Optional[int] = None , _A : Optional[bool] = None , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = True , _A : Optional[Union[str, TensorType]] = None , **_A : Union[str, Any] , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
__magic_name__ : int = BatchFeature()
if text is not None:
__magic_name__ : Tuple = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
__magic_name__ : Dict = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
__magic_name__ : Dict = qformer_text_encoding.pop('input_ids' )
__magic_name__ : Tuple = qformer_text_encoding.pop('attention_mask' )
if images is not None:
__magic_name__ : List[Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def __lowerCAmelCase ( self : Optional[int] , *_A : str , **_A : Union[str, Any] ) -> List[str]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __lowerCAmelCase ( self : str , *_A : Dict , **_A : Tuple ) -> Union[str, Any]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__magic_name__ : Dict = self.tokenizer.model_input_names
__magic_name__ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __lowerCAmelCase ( self : str , _A : Optional[Any] , **_A : Dict ) -> Optional[Any]:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__magic_name__ : Optional[Any] = os.path.join(__lowerCamelCase , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def __lowerCAmelCase ( cls : Tuple , _A : Optional[int] , **_A : List[Any] ) -> List[str]:
__magic_name__ : List[str] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder='qformer_tokenizer' )
__magic_name__ : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase ) | 561 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = TransfoXLTokenizer
a = False
a = False
def lowercase_ ( self : List[str] ) -> List[Any]:
super().setUp()
SCREAMING_SNAKE_CASE__ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase_ ( self : int , **__lowerCamelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE__ = '''<unk> unwanted, running'''
return input_text, output_text
def lowercase_ ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(__lowerCamelCase , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [0, 4, 8, 7] )
def lowercase_ ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = TransfoXLTokenizer(lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def lowercase_ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = TransfoXLTokenizer(lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase_ ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = TransfoXLTokenizer(lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE__ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCamelCase ) , __lowerCamelCase )
def lowercase_ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowerCamelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 493 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_UpperCamelCase : Tuple = logging.get_logger(__name__)
class a ( a_ ):
UpperCAmelCase_ : List[Any] =["pixel_values"]
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = 1 / 2_5_5 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
lowercase = size if size is not None else {'shortest_edge': 2_2_4}
lowercase = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
lowercase = crop_size if crop_size is not None else {'height': 2_5_6, 'width': 2_5_6}
lowercase = get_size_dict(_lowerCamelCase , param_name='crop_size' )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_flip_channel_order
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PIL.Image.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
lowercase = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
lowercase = get_resize_output_image_size(_lowerCamelCase , size=size['shortest_edge'] , default_to_square=_lowerCamelCase )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
lowercase = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_lowerCamelCase , size=(size['height'], size['width']) , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
return flip_channel_order(_lowerCamelCase , data_format=_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = resample if resample is not None else self.resample
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase = size if size is not None else self.size
lowercase = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(_lowerCamelCase , param_name='crop_size' )
lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
lowercase = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase = [self.flip_channel_order(image=_lowerCamelCase ) for image in images]
lowercase = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
lowercase = {'pixel_values': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowerCamelCase ):
lowercase = target_sizes.numpy()
lowercase = []
for idx in range(len(_lowerCamelCase ) ):
lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowerCamelCase )
lowercase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCamelCase )
else:
lowercase = logits.argmax(dim=1 )
lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 134 |
"""simple docstring"""
import math
import unittest
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def UpperCamelCase_ ( self ):
with self.assertRaises(_lowerCamelCase ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 134 | 1 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 76 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 560 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : int = 1
snake_case_ : List[str] = 2
while i * i <= n:
snake_case_ : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : Optional[int] = 1
snake_case_ : str = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 48 |
"""simple docstring"""
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ = None , lowercase__ = None ):
if arr is None and size is not None:
snake_case_ : str = size
snake_case_ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowercase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = len(lowercase__ )
snake_case_ : int = deepcopy(lowercase__ )
for i in range(1 , self.size ):
snake_case_ : Optional[Any] = self.next_(lowercase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase (self ):
snake_case_ : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case_ : Optional[int] = self.next_(lowercase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index + (index & (-index))
@staticmethod
def __UpperCamelCase (lowercase__ ):
return index - (index & (-index))
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case_ : Tuple = self.next_(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.add(lowercase__ , value - self.get(lowercase__ ) )
def __UpperCamelCase (self , lowercase__ ):
if right == 0:
return 0
snake_case_ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case_ : Optional[int] = self.prev(lowercase__ )
return result
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
return self.prefix(lowercase__ ) - self.prefix(lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
return self.query(lowercase__ , index + 1 )
def __UpperCamelCase (self , lowercase__ ):
value -= self.tree[0]
if value < 0:
return -1
snake_case_ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a__: Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''',__lowerCamelCase,)
super().__init__(*__lowerCamelCase,**__lowerCamelCase )
| 190 |
import pytest
import datasets
# Import fixture modules as plugins
a__: Dict = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple )->List[str]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCamelCase__( UpperCamelCase__ : Dict )->List[Any]:
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int )->Union[str, Any]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A__ = tmp_path_factory.getbasetemp() / '''cache'''
A__ = test_hf_cache_home / '''datasets'''
A__ = test_hf_cache_home / '''metrics'''
A__ = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(UpperCamelCase__ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(UpperCamelCase__ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(UpperCamelCase__ ) )
A__ = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(UpperCamelCase__ ) )
A__ = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(UpperCamelCase__ ) )
@pytest.fixture(autouse=UpperCamelCase__ , scope='''session''' )
def UpperCamelCase__( )->int:
datasets.disable_progress_bar()
@pytest.fixture(autouse=UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : str )->Tuple:
# don't take tests into account when counting downloads
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , UpperCamelCase__ )
@pytest.fixture
def UpperCamelCase__( UpperCamelCase__ : List[Any] )->Optional[Any]:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , UpperCamelCase__ )
| 190 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : torch.FloatTensor
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict , __a: Optional[Any]=3 , __a: Any=3 , __a: List[str]=("DownEncoderBlock2D",) , __a: List[str]=(64,) , __a: Union[str, Any]=2 , __a: Union[str, Any]=32 , __a: Union[str, Any]="silu" , __a: List[str]=True , )-> Dict:
super().__init__()
lowerCamelCase : Dict = layers_per_block
lowerCamelCase : List[Any] = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase : str = None
lowerCamelCase : str = nn.ModuleList([] )
# down
lowerCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
lowerCamelCase : Optional[int] = output_channel
lowerCamelCase : Optional[Any] = block_out_channels[i]
lowerCamelCase : List[str] = i == len(__a ) - 1
lowerCamelCase : str = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
lowerCamelCase : int = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
lowerCamelCase : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
lowerCamelCase : int = nn.SiLU()
lowerCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
lowerCamelCase : List[str] = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
lowerCamelCase : str = False
def a__ ( self: List[Any] , __a: Dict )-> Union[str, Any]:
lowerCamelCase : Tuple = x
lowerCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a: Tuple ):
def custom_forward(*__a: int ):
return module(*__a )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
lowerCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
lowerCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
lowerCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
lowerCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
lowerCamelCase : Tuple = down_block(__a )
# middle
lowerCamelCase : Optional[int] = self.mid_block(__a )
# post-process
lowerCamelCase : Tuple = self.conv_norm_out(__a )
lowerCamelCase : Optional[Any] = self.conv_act(__a )
lowerCamelCase : List[Any] = self.conv_out(__a )
return sample
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Union[str, Any] , __a: Dict=3 , __a: Dict=3 , __a: Union[str, Any]=("UpDecoderBlock2D",) , __a: List[Any]=(64,) , __a: List[Any]=2 , __a: List[str]=32 , __a: Dict="silu" , __a: int="group" , )-> Union[str, Any]:
super().__init__()
lowerCamelCase : Dict = layers_per_block
lowerCamelCase : List[str] = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Optional[Any] = nn.ModuleList([] )
lowerCamelCase : Any = in_channels if norm_type == """spatial""" else None
# mid
lowerCamelCase : List[str] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
lowerCamelCase : str = list(reversed(__a ) )
lowerCamelCase : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
lowerCamelCase : Optional[Any] = output_channel
lowerCamelCase : str = reversed_block_out_channels[i]
lowerCamelCase : int = i == len(__a ) - 1
lowerCamelCase : str = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
lowerCamelCase : Tuple = output_channel
# out
if norm_type == "spatial":
lowerCamelCase : int = SpatialNorm(block_out_channels[0] , __a )
else:
lowerCamelCase : str = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
lowerCamelCase : int = nn.SiLU()
lowerCamelCase : Optional[int] = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
lowerCamelCase : Union[str, Any] = False
def a__ ( self: List[Any] , __a: List[Any] , __a: List[str]=None )-> List[Any]:
lowerCamelCase : List[str] = z
lowerCamelCase : str = self.conv_in(__a )
lowerCamelCase : str = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a: str ):
def custom_forward(*__a: List[str] ):
return module(*__a )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
lowerCamelCase : Dict = sample.to(__a )
# up
for up_block in self.up_blocks:
lowerCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
lowerCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
lowerCamelCase : Any = sample.to(__a )
# up
for up_block in self.up_blocks:
lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
lowerCamelCase : Tuple = self.mid_block(__a , __a )
lowerCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
lowerCamelCase : str = up_block(__a , __a )
# post-process
if latent_embeds is None:
lowerCamelCase : Union[str, Any] = self.conv_norm_out(__a )
else:
lowerCamelCase : Optional[Any] = self.conv_norm_out(__a , __a )
lowerCamelCase : Optional[int] = self.conv_act(__a )
lowerCamelCase : List[Any] = self.conv_out(__a )
return sample
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Tuple , __a: str , __a: Any , __a: str , __a: List[str]=None , __a: Optional[Any]="random" , __a: List[str]=False , __a: Optional[Any]=True )-> Optional[int]:
super().__init__()
lowerCamelCase : str = n_e
lowerCamelCase : str = vq_embed_dim
lowerCamelCase : int = beta
lowerCamelCase : Union[str, Any] = legacy
lowerCamelCase : List[Any] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowerCamelCase : int = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
lowerCamelCase : str = self.used.shape[0]
lowerCamelCase : Dict = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCamelCase : Tuple = self.re_embed
lowerCamelCase : Optional[int] = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
lowerCamelCase : Tuple = n_e
lowerCamelCase : List[Any] = sane_index_shape
def a__ ( self: int , __a: List[Any] )-> Tuple:
lowerCamelCase : str = inds.shape
assert len(__a ) > 1
lowerCamelCase : Optional[Any] = inds.reshape(ishape[0] , -1 )
lowerCamelCase : str = self.used.to(__a )
lowerCamelCase : Optional[Any] = (inds[:, :, None] == used[None, None, ...]).long()
lowerCamelCase : Optional[Any] = match.argmax(-1 )
lowerCamelCase : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
lowerCamelCase : Dict = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowerCamelCase : Optional[int] = self.unknown_index
return new.reshape(__a )
def a__ ( self: Union[str, Any] , __a: Optional[int] )-> Optional[Any]:
lowerCamelCase : List[Any] = inds.shape
assert len(__a ) > 1
lowerCamelCase : Any = inds.reshape(ishape[0] , -1 )
lowerCamelCase : Optional[Any] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
lowerCamelCase : int = 0 # simply set to zero
lowerCamelCase : Dict = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def a__ ( self: Any , __a: Optional[int] )-> Dict:
# reshape z -> (batch, height, width, channel) and flatten
lowerCamelCase : Dict = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowerCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCamelCase : Union[str, Any] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
lowerCamelCase : Any = self.embedding(__a ).view(z.shape )
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
lowerCamelCase : Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowerCamelCase : Tuple = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowerCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
lowerCamelCase : List[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowerCamelCase : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowerCamelCase : int = self.remap_to_used(__a )
lowerCamelCase : Optional[int] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowerCamelCase : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def a__ ( self: Tuple , __a: List[str] , __a: List[str] )-> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowerCamelCase : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
lowerCamelCase : str = self.unmap_to_all(__a )
lowerCamelCase : List[str] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowerCamelCase : Dict = self.embedding(__a )
if shape is not None:
lowerCamelCase : List[str] = z_q.view(__a )
# reshape back to match original input shape
lowerCamelCase : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( __lowercase):
"""simple docstring"""
def __init__( self: Dict , __a: Optional[Any] , __a: List[Any]=False )-> str:
lowerCamelCase : Optional[Any] = parameters
lowerCamelCase , lowerCamelCase : List[Any] = torch.chunk(__a , 2 , dim=1 )
lowerCamelCase : Optional[int] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowerCamelCase : Dict = deterministic
lowerCamelCase : Tuple = torch.exp(0.5 * self.logvar )
lowerCamelCase : Union[str, Any] = torch.exp(self.logvar )
if self.deterministic:
lowerCamelCase : Dict = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def a__ ( self: Any , __a: Optional[torch.Generator] = None )-> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
lowerCamelCase : Optional[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
lowerCamelCase : Optional[Any] = self.mean + self.std * sample
return x
def a__ ( self: Optional[Any] , __a: Tuple=None )-> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def a__ ( self: Optional[Any] , __a: Any , __a: List[Any]=[1, 2, 3] )-> str:
if self.deterministic:
return torch.Tensor([0.0] )
lowerCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def a__ ( self: List[Any] )-> Dict:
return self.mean
| 42 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42 | 1 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.nn.Linear(2 , 4 )
__SCREAMING_SNAKE_CASE = torch.optim.AdamW(model.parameters() , lr=1.0 )
__SCREAMING_SNAKE_CASE = torch.optim.lr_scheduler.OneCycleLR(a__ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__SCREAMING_SNAKE_CASE = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__SCREAMING_SNAKE_CASE = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def a__ ( a__ ):
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(a__ )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
@require_cuda
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = Accelerator(cpu=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE = GradientState()
assert state.num_steps == 1
__SCREAMING_SNAKE_CASE = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__SCREAMING_SNAKE_CASE = False
assert state.sync_gradients is False
GradientState._reset_state()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = create_components()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = create_components()
accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : int ):
pass
with patch("""torch.cuda.set_device""" , __SCREAMING_SNAKE_CASE ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
__SCREAMING_SNAKE_CASE = Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = create_components()
accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = get_signature(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# make sure random weights don't match
load_random_weights(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) < 1E-3 )
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = create_components()
accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = get_signature(__SCREAMING_SNAKE_CASE )
# saving hook
def save_config(__SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ):
__SCREAMING_SNAKE_CASE = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(__SCREAMING_SNAKE_CASE , """data.json""" ) , """w""" ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# loading hook
def load_config(__SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
with open(os.path.join(__SCREAMING_SNAKE_CASE , """data.json""" ) , """r""" ) as f:
__SCREAMING_SNAKE_CASE = json.load(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = config["""class_name"""]
__SCREAMING_SNAKE_CASE = accelerator.register_save_state_pre_hook(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = accelerator.register_load_state_pre_hook(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# make sure random weights don't match with hooks
load_random_weights(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) > 1E-3 )
# random class name to verify correct one is loaded
__SCREAMING_SNAKE_CASE = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# make sure random weights don't match with hooks removed
load_random_weights(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) > 1E-3 )
# random class name to verify correct one is loaded
__SCREAMING_SNAKE_CASE = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(__SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = create_components()
__SCREAMING_SNAKE_CASE = None
# This should work
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertTrue(dummy_obj is None )
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = create_components()
__SCREAMING_SNAKE_CASE = [1, 2, 3]
# This should work
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
from transformers import AutoModelForCausalLM
__SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__SCREAMING_SNAKE_CASE , device_map={"""""": 0} , )
__SCREAMING_SNAKE_CASE = Accelerator()
# This should work
__SCREAMING_SNAKE_CASE = accelerator.prepare(__SCREAMING_SNAKE_CASE )
@slow
@require_bnb
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
from transformers import AutoModelForCausalLM
__SCREAMING_SNAKE_CASE = Accelerator()
with init_empty_weights():
__SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
__SCREAMING_SNAKE_CASE = infer_auto_device_map(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=__SCREAMING_SNAKE_CASE , load_in_abit=__SCREAMING_SNAKE_CASE , llm_inta_enable_fpaa_cpu_offload=__SCREAMING_SNAKE_CASE )
# This should not work and get value error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = accelerator.prepare(__SCREAMING_SNAKE_CASE )
@slow
@require_bnb
@require_multi_gpu
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
from transformers import AutoModelForCausalLM
__SCREAMING_SNAKE_CASE = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
__SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
__SCREAMING_SNAKE_CASE = infer_auto_device_map(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__SCREAMING_SNAKE_CASE , device_map=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = Accelerator()
# This should not work and get value error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = accelerator.prepare(__SCREAMING_SNAKE_CASE )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
__SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
__SCREAMING_SNAKE_CASE = infer_auto_device_map(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__SCREAMING_SNAKE_CASE , device_map=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = Accelerator()
# This should work
__SCREAMING_SNAKE_CASE = accelerator.prepare(__SCREAMING_SNAKE_CASE )
@require_cuda
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.nn.Linear(10 , 10 )
__SCREAMING_SNAKE_CASE = torch.optim.SGD(model.parameters() , lr=0.01 )
__SCREAMING_SNAKE_CASE = Accelerator(cpu=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = accelerator.prepare(__SCREAMING_SNAKE_CASE )
| 627 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = tokenizer(example["""content"""] , truncation=a__ )["""input_ids"""]
__SCREAMING_SNAKE_CASE = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase : Optional[Any] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase : List[Any] = multiprocessing.cpu_count()
UpperCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase : Optional[int] = time.time()
UpperCAmelCase : Any = load_dataset(args.dataset_name, split='train')
print(f"""Dataset loaded in {time.time()-t_start:.2f}s""")
UpperCAmelCase : List[str] = time.time()
UpperCAmelCase : List[str] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(f"""Dataset tokenized in {time.time()-t_start:.2f}s""")
UpperCAmelCase : List[Any] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 627 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__lowerCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = ["pixel_values"]
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> None:
super().__init__(**__SCREAMING_SNAKE_CASE )
a_ : Tuple = size if size is not None else {'''shortest_edge''': 256}
a_ : str = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
a_ : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
a_ : str = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
a_ : Tuple = do_resize
a_ : str = size
a_ : Any = resample
a_ : Optional[Any] = do_center_crop
a_ : Union[str, Any] = crop_size
a_ : List[Any] = do_rescale
a_ : Dict = rescale_factor
a_ : Tuple = do_normalize
a_ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a_ : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self : Any , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : int , ) -> np.ndarray:
a_ : List[Any] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
a_ : str = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=__SCREAMING_SNAKE_CASE )
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> np.ndarray:
a_ : Dict = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(__SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : str ) -> np.ndarray:
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> np.ndarray:
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Tuple , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[float] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Union[str, Any]:
a_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
a_ : Tuple = size if size is not None else self.size
a_ : List[Any] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
a_ : str = resample if resample is not None else self.resample
a_ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ : Optional[int] = crop_size if crop_size is not None else self.crop_size
a_ : List[str] = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
a_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
a_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
a_ : str = image_mean if image_mean is not None else self.image_mean
a_ : Union[str, Any] = image_std if image_std is not None else self.image_std
a_ : Dict = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
a_ : Union[str, Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
a_ : List[str] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
a_ : List[Any] = [self.center_crop(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
a_ : List[Any] = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
a_ : Dict = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
a_ : Optional[Any] = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
a_ : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Tuple] = None ) -> Dict:
a_ : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__SCREAMING_SNAKE_CASE ):
a_ : Tuple = target_sizes.numpy()
a_ : str = []
for idx in range(len(__SCREAMING_SNAKE_CASE ) ):
a_ : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
a_ : List[str] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__SCREAMING_SNAKE_CASE )
else:
a_ : List[str] = logits.argmax(dim=1 )
a_ : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 666 |
'''simple docstring'''
import sys
__lowerCAmelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( __A : str ):
a_ : Tuple = 1
for digit in s:
product *= int(__A )
return product
def _UpperCAmelCase ( __A : str = N ):
a_ : Dict = -sys.maxsize - 1
a_ : Optional[int] = n[:13]
a_ : str = 13
while cur_index < len(__A ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
a_ : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
a_ : Dict = max(__A , str_eval(__A ) )
a_ : List[str] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 666 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ):
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 74 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def UpperCAmelCase ( A__: Tuple ) -> List[str]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowercase( _A ):
'''simple docstring'''
@staticmethod
def snake_case_ ( __a ):
__lowerCamelCase : Any = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=__a , default=__a , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=__a , help='Name of the model to download' )
download_parser.set_defaults(func=__a )
def __init__( self , __a , __a , __a , __a ):
__lowerCamelCase : List[Any] = model
__lowerCamelCase : int = cache
__lowerCamelCase : Union[str, Any] = force
__lowerCamelCase : Dict = trust_remote_code
def snake_case_ ( self ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 713 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
a_ : Tuple = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
a_ : Optional[Any] = '''</w>'''
a_ : Dict = '''@@ '''
def UpperCAmelCase ( A__: Optional[Any] ) -> Optional[int]:
__lowerCamelCase : Any = set()
__lowerCamelCase : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase : Optional[int] = char
return pairs
# Speech2Text2 has no max input length
a_ : Union[str, Any] = {'''facebook/s2t-wav2vec2-large-en-de''': 10_24}
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : List[str] = VOCAB_FILES_NAMES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Dict = ['input_ids', 'attention_mask']
def __init__( self , __a , __a="<s>" , __a="<pad>" , __a="</s>" , __a="<unk>" , __a=False , __a=None , **__a , ):
super().__init__(
unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , do_lower_case=__a , **__a , )
__lowerCamelCase : List[Any] = do_lower_case
with open(__a , encoding='utf-8' ) as vocab_handle:
__lowerCamelCase : str = json.load(__a )
__lowerCamelCase : int = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
__lowerCamelCase : str = None
__lowerCamelCase : Optional[int] = None
else:
with open(__a , encoding='utf-8' ) as merges_handle:
__lowerCamelCase : Optional[int] = merges_handle.read().split('\n' )[:-1]
__lowerCamelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
__lowerCamelCase : Any = dict(zip(__a , range(len(__a ) ) ) )
__lowerCamelCase : List[str] = {}
@property
def snake_case_ ( self ):
return len(self.decoder )
def snake_case_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self , __a ):
__lowerCamelCase : Optional[Any] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__lowerCamelCase : Union[str, Any] = get_pairs(__a )
if not pairs:
return token
while True:
__lowerCamelCase : Dict = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase : Optional[Any] = bigram
__lowerCamelCase : Tuple = []
__lowerCamelCase : Union[str, Any] = 0
while i < len(__a ):
try:
__lowerCamelCase : Union[str, Any] = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase : Any = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase : List[str] = tuple(__a )
__lowerCamelCase : Any = new_word
if len(__a ) == 1:
break
else:
__lowerCamelCase : Tuple = get_pairs(__a )
__lowerCamelCase : Optional[int] = ' '.join(__a )
if word == "\n " + BPE_TOKEN_MERGES:
__lowerCamelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(__a ):
__lowerCamelCase : Tuple = word.replace(__a , '' )
__lowerCamelCase : Optional[Any] = word.replace(' ' , __a )
__lowerCamelCase : Union[str, Any] = word
return word
def snake_case_ ( self , __a ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
__lowerCamelCase : Any = text.lower()
__lowerCamelCase : Union[str, Any] = text.split()
__lowerCamelCase : str = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__a ).split(' ' ) ) )
return split_tokens
def snake_case_ ( self , __a ):
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def snake_case_ ( self , __a ):
__lowerCamelCase : int = self.decoder.get(__a , self.unk_token )
return result
def snake_case_ ( self , __a ):
__lowerCamelCase : Any = ' '.join(__a )
# make sure @@ tokens are concatenated
__lowerCamelCase : Union[str, Any] = ''.join(string.split(__a ) )
return string
def snake_case_ ( self , __a , __a = None ):
if not os.path.isdir(__a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCamelCase : int = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Any = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + '\n' )
__lowerCamelCase : Optional[int] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__a , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
__lowerCamelCase : Optional[int] = token_index
writer.write(' '.join(__a ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 263 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowercase = get_tests_dir("""fixtures""")
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = mock.Mock()
_lowerCAmelCase = 500
_lowerCAmelCase = {}
_lowerCAmelCase = HTTPError
_lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=_lowercase ) as mock_head:
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowercase ( cls ):
"""simple docstring"""
_lowerCAmelCase = TOKEN
HfFolder.save_token(_lowercase )
@classmethod
def _lowercase ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_lowercase )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowercase , repo_id="""test-feature-extractor""" , push_to_hub=_lowercase , use_auth_token=self._token )
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_lowercase )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowercase , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=_lowercase , use_auth_token=self._token )
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
def _lowercase ( self ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
_lowerCAmelCase = CustomFeatureExtractor.from_pretrained(_lowercase )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
_lowerCAmelCase = AutoFeatureExtractor.from_pretrained(
F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=_lowercase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 5 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : Optional[Any] ={
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict =['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] =[
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
A_ : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 274 | 0 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = "▁"
__lowerCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = BigBirdTokenizer
__SCREAMING_SNAKE_CASE : str = BigBirdTokenizerFast
__SCREAMING_SNAKE_CASE : Tuple = True
__SCREAMING_SNAKE_CASE : List[Any] = True
def UpperCAmelCase__ ( self : int ):
super().setUp()
_UpperCAmelCase = self.tokenizer_class(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = "<s>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(__UpperCamelCase ) , 1_004 )
def UpperCAmelCase__ ( self : List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = BigBirdTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
_UpperCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [285, 46, 10, 170, 382] , )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCAmelCase__ ( self : Any ):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = "Hello World!"
_UpperCAmelCase = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
_UpperCAmelCase = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@require_torch
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_UpperCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.big_tokenizer.encode_plus(__UpperCamelCase , return_tensors="pt" , return_token_type_ids=__UpperCamelCase )
_UpperCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__UpperCamelCase )
_UpperCAmelCase = BigBirdConfig(attention_type="original_full" )
_UpperCAmelCase = BigBirdModel(__UpperCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCamelCase )
model(**__UpperCamelCase )
@slow
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
_UpperCAmelCase = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
# fmt: off
_UpperCAmelCase = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 129 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__lowerCAmelCase = "sshleifer/bart-tiny-random"
__lowerCAmelCase = "patrickvonplaten/t5-tiny-random"
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Tuple ):
return AutoConfig.from_pretrained(__UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=__UpperCamelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def UpperCAmelCase__ ( self : str ):
with self.assertRaises(__UpperCamelCase ):
create_student_by_copying_alternating_layers(__UpperCamelCase , tempfile.mkdtemp() , e=__UpperCamelCase , d=__UpperCamelCase )
| 129 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 42
snake_case_ = None
snake_case_ = None
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
def is_valid_tree(a_ ) -> bool:
if node is None:
return True
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowerCamelCase_ ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
a_ , a_ , a_ ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , lowerCamelCase_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , lowerCamelCase_ )
)
return is_binary_search_tree_recursive_check(lowerCamelCase_ , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
_lowercase : Tuple = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Any = True if 'large' in model_name or 'huge' in model_name else False
_lowercase : Dict = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_lowercase : Any = [3, 3, 3, 3]
_lowercase : Any = [5, 5, 5, 5]
elif "fl4" in model_name:
_lowercase : Dict = [4, 4, 4, 4]
_lowercase : Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_lowercase : str = [3, 3, 3, 3]
if "lrf" in model_name:
_lowercase : Optional[int] = [3, 3, 3, 3]
else:
_lowercase : Dict = [2, 2, 2, 2]
if "tiny" in model_name:
_lowercase : List[str] = 96
elif "small" in model_name:
_lowercase : Dict = 96
elif "base" in model_name:
_lowercase : Optional[int] = 128
elif "large" in model_name:
_lowercase : List[Any] = 192
elif "xlarge" in model_name:
_lowercase : Optional[Any] = 256
elif "huge" in model_name:
_lowercase : Dict = 352
# set label information
_lowercase : int = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
_lowercase : str = 'imagenet-22k-id2label.json'
else:
_lowercase : Tuple = 'imagenet-1k-id2label.json'
_lowercase : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : Any = {v: k for k, v in idalabel.items()}
_lowercase : Optional[Any] = FocalNetConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , focal_levels=lowerCamelCase_ , focal_windows=lowerCamelCase_ , use_conv_embed=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , use_post_layernorm=lowerCamelCase_ , use_layerscale=lowerCamelCase_ , )
return config
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
if "patch_embed.proj" in name:
_lowercase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowercase : str = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_lowercase : Any = 'encoder.' + name
if "encoder.layers" in name:
_lowercase : int = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
_lowercase : Tuple = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
_lowercase : str = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_lowercase : List[str] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_lowercase : int = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_lowercase : Any = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
_lowercase : Any = 'layernorm.weight'
if name == "norm.bias":
_lowercase : Tuple = 'layernorm.bias'
if "head" in name:
_lowercase : Optional[int] = name.replace('head' , 'classifier' )
else:
_lowercase : Optional[int] = 'focalnet.' + name
return name
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> str:
# fmt: off
_lowercase : Dict = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
_lowercase : Dict = model_name_to_url[model_name]
print('Checkpoint URL: ' , lowerCamelCase_ )
_lowercase : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[int] = val
_lowercase : Union[str, Any] = get_focalnet_config(lowerCamelCase_ )
_lowercase : Optional[Any] = FocalNetForImageClassification(lowerCamelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase_ )
# verify conversion
_lowercase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Any = BitImageProcessor(
do_resize=lowerCamelCase_ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase_ , crop_size=224 , do_normalize=lowerCamelCase_ , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ , )
_lowercase : List[str] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
_lowercase : List[Any] = processor(images=lowerCamelCase_ , return_tensors='pt' )
_lowercase : str = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowercase : List[str] = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase_ , atol=1e-4 )
_lowercase : Dict = model(**lowerCamelCase_ )
_lowercase : int = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_lowercase : Optional[Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_lowercase : int = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_lowercase : str = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_lowercase : Any = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_lowercase : List[Any] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_lowercase : int = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 | 0 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 364 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {"tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : Tuple = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase_ = None
def __init__( self :List[Any] , lowerCamelCase :Union[str, Any]=None , lowerCamelCase :Optional[Any]=None , lowerCamelCase :List[Any]=None , lowerCamelCase :Union[str, Any]="<unk>" , lowerCamelCase :Union[str, Any]="<s>" , lowerCamelCase :Union[str, Any]="</s>" , lowerCamelCase :str="<pad>" , lowerCamelCase :Any=False , lowerCamelCase :Optional[Any]=False , **lowerCamelCase :Optional[Any] , ) -> Any:
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , add_prefix_space=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space:
UpperCAmelCase__ = getattr(lowerCamelCase , pre_tok_state.pop("type" ) )
UpperCAmelCase__ = add_prefix_space
UpperCAmelCase__ = pre_tok_class(**lowerCamelCase )
UpperCAmelCase__ = add_prefix_space
def UpperCAmelCase_ ( self :int , *lowerCamelCase :Any , **lowerCamelCase :int ) -> BatchEncoding:
UpperCAmelCase__ = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def UpperCAmelCase_ ( self :Any , *lowerCamelCase :List[Any] , **lowerCamelCase :Union[str, Any] ) -> BatchEncoding:
UpperCAmelCase__ = kwargs.get("is_split_into_words" , lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
" pretokenized inputs." )
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :str , lowerCamelCase :Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase__ = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :"Conversation" ) -> List[int]:
UpperCAmelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
UpperCAmelCase__ = input_ids[-self.model_max_length :]
return input_ids
| 364 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A : int = None
__A : str = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : Tuple = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__A : Dict = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
__A : Tuple = "▁"
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = BigBirdTokenizer
__magic_name__ = ['input_ids', 'attention_mask']
__magic_name__ = []
def __init__( self , snake_case_=None , snake_case_=None , snake_case_="<unk>" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="[SEP]" , snake_case_="[MASK]" , snake_case_="[CLS]" , **snake_case_ , ):
_A = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
_A = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
_A = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
_A = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
_A = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
_A = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
_A = vocab_file
_A = False if not self.vocab_file else True
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_A = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 27 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ , token_type_ids=snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = NystromformerForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = NystromformerForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_labels
_A = NystromformerForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_A = self.num_choices
_A = NystromformerForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = NystromformerModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = NystromformerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self ):
_A = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
_A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_A = model(snake_case_ )[0]
_A = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , snake_case_ )
_A = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self ):
_A = 'the [MASK] of Belgium is Brussels'
_A = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
_A = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
_A = tokenizer(snake_case_ , return_tensors='pt' )
with torch.no_grad():
_A = model(encoding.input_ids ).logits
_A = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case_ ) , 'capital' )
| 27 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __UpperCAmelCase ( a_ , a_):
snake_case_ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
snake_case_ = Image.open(requests.get(snake_case__ , stream=snake_case__).raw).convert('RGB')
snake_case_ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11)),
])
snake_case_ = transform(snake_case__).unsqueeze(0).to(snake_case__)
return image
def __UpperCAmelCase ( a_):
if "visual_encoder" in key:
snake_case_ = re.sub('visual_encoder*' , 'vision_model.encoder' , snake_case__)
if "blocks" in key:
snake_case_ = re.sub(R'blocks' , 'layers' , snake_case__)
if "attn" in key:
snake_case_ = re.sub(R'attn' , 'self_attn' , snake_case__)
if "norm1" in key:
snake_case_ = re.sub(R'norm1' , 'layer_norm1' , snake_case__)
if "norm2" in key:
snake_case_ = re.sub(R'norm2' , 'layer_norm2' , snake_case__)
if "encoder.norm" in key:
snake_case_ = re.sub(R'encoder.norm' , 'post_layernorm' , snake_case__)
if "encoder.patch_embed.proj" in key:
snake_case_ = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , snake_case__)
if "encoder.pos_embed" in key:
snake_case_ = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , snake_case__)
if "encoder.cls_token" in key:
snake_case_ = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , snake_case__)
if "self_attn" in key:
snake_case_ = re.sub(R'self_attn.proj' , 'self_attn.projection' , snake_case__)
return key
@torch.no_grad()
def __UpperCAmelCase ( a_ , a_=None):
if config_path is not None:
snake_case_ = BlipConfig.from_pretrained(snake_case__)
else:
snake_case_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={})
snake_case_ = BlipForConditionalGeneration(snake_case__).eval()
snake_case_ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
snake_case_ = blip_decoder(pretrained=snake_case__ , image_size=3_84 , vit='base')
snake_case_ = pt_model.eval()
snake_case_ = pt_model.state_dict()
for key in modified_state_dict.copy():
snake_case_ = modified_state_dict.pop(snake_case__)
snake_case_ = rename_key(snake_case__)
snake_case_ = value
hf_model.load_state_dict(snake_case__)
snake_case_ = 3_84
snake_case_ = load_demo_image(image_size=snake_case__ , device='cpu')
snake_case_ = BertTokenizer.from_pretrained('bert-base-uncased')
snake_case_ = tokenizer(['a picture of']).input_ids
snake_case_ = hf_model.generate(snake_case__ , snake_case__)
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
snake_case_ = hf_model.generate(snake_case__)
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case__)
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
snake_case_ = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
snake_case_ = blip_vqa(pretrained=snake_case__ , image_size=snake_case__ , vit='base')
vqa_model.eval()
snake_case_ = vqa_model.state_dict()
for key in modified_state_dict.copy():
snake_case_ = modified_state_dict.pop(snake_case__)
snake_case_ = rename_key(snake_case__)
snake_case_ = value
snake_case_ = BlipForQuestionAnswering(snake_case__)
hf_vqa_model.load_state_dict(snake_case__)
snake_case_ = ['How many dogs are in this image?']
snake_case_ = tokenizer(snake_case__ , return_tensors='pt').input_ids
snake_case_ = hf_vqa_model.generate(snake_case__ , snake_case__)
print(tokenizer.decode(answer[0]))
assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa')
snake_case_ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
snake_case_ = blip_itm(pretrained=snake_case__ , image_size=snake_case__ , vit='base')
itm_model.eval()
snake_case_ = itm_model.state_dict()
for key in modified_state_dict.copy():
snake_case_ = modified_state_dict.pop(snake_case__)
snake_case_ = rename_key(snake_case__)
snake_case_ = value
snake_case_ = BlipForImageTextRetrieval(snake_case__)
snake_case_ = ['A picture of a woman with a dog sitting in a beach']
snake_case_ = tokenizer(
snake_case__ , return_tensors='pt' , padding='max_length' , truncation=snake_case__ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case__)
hf_itm_model.eval()
snake_case_ = hf_itm_model(snake_case__ , snake_case__ , use_itm_head=snake_case__)
snake_case_ = hf_itm_model(snake_case__ , snake_case__ , use_itm_head=snake_case__)
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1)[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm')
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowercase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 717 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowercase = get_tests_dir("fixtures")
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case_ = mock.Mock()
snake_case_ = 5_00
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=a ) as mock_head:
snake_case_ = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
snake_case_ = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def _UpperCamelCase ( self ) -> Optional[int]:
with self.assertRaises(a ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case_ = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
snake_case_ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor' )
self.assertIsNotNone(a )
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _UpperCamelCase ( cls ) -> int:
snake_case_ = TOKEN
HfFolder.save_token(a )
@classmethod
def _UpperCamelCase ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = ViTImageProcessor.from_pretrained(a )
image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
a , repo_id='test-image-processor' , push_to_hub=a , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(a , getattr(a , a ) )
def _UpperCamelCase ( self ) -> int:
snake_case_ = ViTImageProcessor.from_pretrained(a )
image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
a , repo_id='valid_org/test-image-processor-org' , push_to_hub=a , use_auth_token=self._token )
snake_case_ = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(a , getattr(a , a ) )
def _UpperCamelCase ( self ) -> int:
CustomImageProcessor.register_for_auto_class()
snake_case_ = CustomImageProcessor.from_pretrained(a )
image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , )
snake_case_ = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=a )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor' )
| 607 | 0 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str=0 ) -> Any:
__UpperCAmelCase =floats_tensor((1, 3, 128, 128) , rng=random.Random(__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =np.random.RandomState(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : Optional[Any] ) -> int:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# warmup pass to apply optimizations
__UpperCAmelCase =pipe(**self.get_dummy_inputs() )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.get_dummy_inputs()
__UpperCAmelCase =pipe(**__SCREAMING_SNAKE_CASE ).images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase =np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : List[str] ) -> Optional[int]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Dict ) -> int:
__UpperCAmelCase =ort.SessionOptions()
__UpperCAmelCase =False
return options
def _a ( self : Dict ) -> Any:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase =init_image.resize((768, 512) )
# using the PNDM scheduler by default
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""A fantasy landscape, trending on artstation"""
__UpperCAmelCase =np.random.RandomState(0 )
__UpperCAmelCase =pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
__UpperCAmelCase =output.images
__UpperCAmelCase =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase =np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _a ( self : List[str] ) -> str:
__UpperCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase =init_image.resize((768, 512) )
__UpperCAmelCase =LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__UpperCAmelCase =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""A fantasy landscape, trending on artstation"""
__UpperCAmelCase =np.random.RandomState(0 )
__UpperCAmelCase =pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
__UpperCAmelCase =output.images
__UpperCAmelCase =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase =np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 68 |
from __future__ import annotations
def lowerCAmelCase__( lowercase : list[int] , lowercase : int ) -> int:
if len(lowercase ) < k or k < 0:
raise ValueError("Invalid Input" )
__snake_case : Tuple = sum(array[:k] )
for i in range(len(lowercase ) - k ):
__snake_case : str = current_sum - array[i] + array[i + k]
__snake_case : List[str] = max(lowercase , lowercase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_UpperCamelCase = [randint(-1000, 1000) for i in range(100)]
_UpperCamelCase = randint(0, 110)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 243 | 0 |
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 ):
"""simple docstring"""
UpperCAmelCase__ :List[Any] = right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A , __A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
__a : Optional[int] = ["""image_processor""", """tokenizer"""]
__a : int = """OwlViTImageProcessor"""
__a : Optional[int] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , A=None , A=None , **A ) ->str:
UpperCAmelCase__ :str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , A , )
UpperCAmelCase__ :List[Any] = kwargs.pop('feature_extractor' )
UpperCAmelCase__ :Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A , A )
def __call__( self , A=None , A=None , A=None , A="max_length" , A="np" , **A ) ->Tuple:
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A , A ) or (isinstance(A , A ) and not isinstance(text[0] , A )):
UpperCAmelCase__ :Optional[Any] = [self.tokenizer(A , padding=A , return_tensors=A , **A )]
elif isinstance(A , A ) and isinstance(text[0] , A ):
UpperCAmelCase__ :Dict = []
# Maximum number of queries across batch
UpperCAmelCase__ :Dict = max([len(A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A ) != max_num_queries:
UpperCAmelCase__ :List[str] = t + [' '] * (max_num_queries - len(A ))
UpperCAmelCase__ :List[str] = self.tokenizer(A , padding=A , return_tensors=A , **A )
encodings.append(A )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
UpperCAmelCase__ :Any = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :int = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ :int = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :Optional[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ :List[str] = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
UpperCAmelCase__ :int = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ :List[str] = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :List[str] = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
UpperCAmelCase__ :Optional[int] = BatchEncoding()
UpperCAmelCase__ :Any = input_ids
UpperCAmelCase__ :str = attention_mask
if query_images is not None:
UpperCAmelCase__ :Optional[int] = BatchEncoding()
UpperCAmelCase__ :Tuple = self.image_processor(
A , return_tensors=A , **A ).pixel_values
UpperCAmelCase__ :str = query_pixel_values
if images is not None:
UpperCAmelCase__ :Optional[int] = self.image_processor(A , return_tensors=A , **A )
if text is not None and images is not None:
UpperCAmelCase__ :Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ :int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) , tensor_type=A )
def A__ ( self , *A , **A ) ->Tuple:
return self.image_processor.post_process(*A , **A )
def A__ ( self , *A , **A ) ->Tuple:
return self.image_processor.post_process_object_detection(*A , **A )
def A__ ( self , *A , **A ) ->Any:
return self.image_processor.post_process_image_guided_detection(*A , **A )
def A__ ( self , *A , **A ) ->Optional[int]:
return self.tokenizer.batch_decode(*A , **A )
def A__ ( self , *A , **A ) ->Dict:
return self.tokenizer.decode(*A , **A )
@property
def A__ ( self ) ->Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , A , )
return self.image_processor_class
@property
def A__ ( self ) ->Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , A , )
return self.image_processor
| 433 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
create_state_space_tree(lowercase ,[] ,0 ,[0 for i in range(len(lowercase ) )] )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,) -> None:
if index == len(lowercase ):
print(lowercase )
return
for i in range(len(lowercase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case : List[Any] = True
create_state_space_tree(lowercase ,lowercase ,index + 1 ,lowercase )
current_sequence.pop()
snake_case : str = False
lowerCamelCase : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCamelCase : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 587 |
def _a ( UpperCAmelCase = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__ : int = set(range(3 , UpperCAmelCase , 2 ) )
primes.add(2 )
for p in range(3 , UpperCAmelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCAmelCase , UpperCAmelCase ) ) )
lowerCamelCase__ : List[Any] = [float(UpperCAmelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCAmelCase , limit + 1 , UpperCAmelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 315 | 0 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase_ : Optional[Any] = logging.getLogger(__name__)
def _lowerCAmelCase ( ) -> Any:
lowerCAmelCase_ : int = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=lowerCAmelCase_ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=lowerCAmelCase_ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=lowerCAmelCase_ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=lowerCAmelCase_ , default=10_00 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=lowerCAmelCase_ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=lowerCAmelCase_ , default=5_12 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=lowerCAmelCase_ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
return args
def _lowerCAmelCase ( _a : Tuple ) -> Union[str, Any]:
def fn(_a : int ):
return tokenizer(examples["""text"""] )
return fn
def _lowerCAmelCase ( _a : Any ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
lowerCAmelCase_ : Tuple = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
lowerCAmelCase_ : List[Any] = tf.train.Features(feature=lowerCAmelCase_ )
lowerCAmelCase_ : str = tf.train.Example(features=lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = example.SerializeToString()
records.append(lowerCAmelCase_ )
return records
def _lowerCAmelCase ( _a : List[Any] ) -> Optional[Any]:
lowerCAmelCase_ : List[Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase_ : str = min(len(lowerCAmelCase_ ) , args.limit )
lowerCAmelCase_ : Optional[Any] = dataset.select(range(lowerCAmelCase_ ) )
print(F'Limiting the dataset to {args.limit} entries.' )
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase_ : Dict = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
else:
lowerCAmelCase_ : Optional[int] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase_ : List[str] = tokenize_function(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = dataset.map(lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_a : str ):
# Concatenate all texts.
lowerCAmelCase_ : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase_ : Optional[Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase_ : int = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase_ : int = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCAmelCase_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase_ : Tuple = dataset_tokenized.map(lowerCAmelCase_ , batched=lowerCAmelCase_ , batch_size=10_00 , num_proc=4 )
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Union[str, Any] = 0
for shard in range(0 , len(lowerCAmelCase_ ) , args.shard_size ):
lowerCAmelCase_ : Union[str, Any] = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase_ : Union[str, Any] = len(dataset_snapshot["""input_ids"""] )
lowerCAmelCase_ : Any = os.path.join(lowerCAmelCase_ , F'dataset-{shard_count}-{records_containing}.tfrecord' )
lowerCAmelCase_ : int = get_serialized_examples(lowerCAmelCase_ )
with tf.io.TFRecordWriter(lowerCAmelCase_ ) as out_file:
for i in range(len(lowerCAmelCase_ ) ):
lowerCAmelCase_ : int = serialized_examples[i]
out_file.write(lowerCAmelCase_ )
print("""Wrote file {} containing {} records""".format(lowerCAmelCase_ , lowerCAmelCase_ ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , """w""" ) as f:
print(F'Total {args.split} records: {total_records}' , file=lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase_ : int = parse_args()
main(args)
| 703 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowercase__ :
@staticmethod
def UpperCAmelCase__ ( *_lowercase , **_lowercase ):
pass
def _lowerCAmelCase ( _a : List[str] ) -> Any:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
UpperCAmelCase_ : Optional[Any] = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
__UpperCamelCase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ):
lowerCAmelCase_ : Optional[int] = pipeline(
"""document-question-answering""" , model=_lowercase , tokenizer=_lowercase , image_processor=_lowercase )
lowerCAmelCase_ : List[str] = INVOICE_URL
lowerCAmelCase_ : str = list(zip(*apply_tesseract(load_image(_lowercase ) , _lowercase , """""" ) ) )
lowerCAmelCase_ : int = """What is the placebo?"""
lowerCAmelCase_ : str = [
{
"""image""": load_image(_lowercase ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def UpperCAmelCase__ ( self , _lowercase , _lowercase ):
lowerCAmelCase_ : List[str] = dqa_pipeline(_lowercase , top_k=2 )
self.assertEqual(
_lowercase , [
[
{"""score""": ANY(_lowercase ), """answer""": ANY(_lowercase ), """start""": ANY(_lowercase ), """end""": ANY(_lowercase )},
{"""score""": ANY(_lowercase ), """answer""": ANY(_lowercase ), """start""": ANY(_lowercase ), """end""": ANY(_lowercase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Tuple = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
lowerCAmelCase_ : str = INVOICE_URL
lowerCAmelCase_ : int = """How many cats are there?"""
lowerCAmelCase_ : List[Any] = [
{"""score""": 0.0001, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.0001, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
lowerCAmelCase_ : Optional[Any] = dqa_pipeline(image=_lowercase , question=_lowercase , top_k=2 )
self.assertEqual(nested_simplify(_lowercase , decimals=4 ) , _lowercase )
lowerCAmelCase_ : Any = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(_lowercase , decimals=4 ) , _lowercase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCAmelCase_ : List[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowerCAmelCase_ : Any = dqa_pipeline(image=_lowercase , question=_lowercase , top_k=2 )
self.assertEqual(_lowercase , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCAmelCase_ : int = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowerCAmelCase_ : int = []
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[Any] = dqa_pipeline(image=_lowercase , question=_lowercase , words=_lowercase , boxes=_lowercase , top_k=2 )
self.assertEqual(_lowercase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[Any] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
lowerCAmelCase_ : Dict = INVOICE_URL
lowerCAmelCase_ : int = """What is the invoice number?"""
lowerCAmelCase_ : Union[str, Any] = dqa_pipeline(image=_lowercase , question=_lowercase , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowerCAmelCase_ : List[Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowerCAmelCase_ : List[str] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[int] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
lowerCAmelCase_ : List[str] = INVOICE_URL
lowerCAmelCase_ : Optional[int] = """What is the invoice number?"""
lowerCAmelCase_ : List[Any] = dqa_pipeline(image=_lowercase , question=_lowercase , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowerCAmelCase_ : Optional[Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowerCAmelCase_ : List[Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=_lowercase )
lowerCAmelCase_ : Any = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=_lowercase , revision="""3dc6de3""" , )
lowerCAmelCase_ : int = INVOICE_URL
lowerCAmelCase_ : Union[str, Any] = """What is the invoice number?"""
lowerCAmelCase_ : Optional[Any] = dqa_pipeline(image=_lowercase , question=_lowercase , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
lowerCAmelCase_ : int = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
lowerCAmelCase_ : Union[str, Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
lowerCAmelCase_ : int = list(zip(*apply_tesseract(load_image(_lowercase ) , _lowercase , """""" ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase_ : List[Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=_lowercase )
lowerCAmelCase_ : Optional[Any] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=_lowercase , revision="""3dc6de3""" , max_seq_len=50 , )
lowerCAmelCase_ : Tuple = INVOICE_URL
lowerCAmelCase_ : Tuple = """What is the invoice number?"""
lowerCAmelCase_ : Optional[int] = dqa_pipeline(image=_lowercase , question=_lowercase , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
lowerCAmelCase_ : int = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
lowerCAmelCase_ : Union[str, Any] = list(zip(*apply_tesseract(load_image(_lowercase ) , _lowercase , """""" ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase_ : List[str] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : int = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
lowerCAmelCase_ : Union[str, Any] = INVOICE_URL
lowerCAmelCase_ : int = """What is the invoice number?"""
lowerCAmelCase_ : str = dqa_pipeline(image=_lowercase , question=_lowercase , top_k=2 )
self.assertEqual(nested_simplify(_lowercase , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def UpperCAmelCase__ ( self ):
pass
| 440 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any]=7 , lowerCAmelCase : int=3 , lowerCAmelCase : int=18 , lowerCAmelCase : Union[str, Any]=30 , lowerCAmelCase : Dict=400 , lowerCAmelCase : Any=True , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=[0.5, 0.5, 0.5] , lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , ):
lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def __lowercase ( self : Optional[int] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def __lowercase ( self : List[str] ):
lowerCAmelCase = DPTImageProcessingTester(self )
@property
def __lowercase ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : List[Any] ):
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """size""" ) )
def __lowercase ( self : Any ):
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __lowercase ( self : int ):
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowercase ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __lowercase ( self : List[Any] ):
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 169 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str=13 , lowerCAmelCase : List[Any]=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : Optional[int]=5 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Union[str, Any]=37 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=50 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Union[str, Any]=None , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = use_labels
lowerCAmelCase = scope
def __lowercase ( self : int ):
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowercase ( self : List[str] ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowercase ( self : Dict ):
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowercase ( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any] , ):
lowerCAmelCase = BertGenerationEncoder(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
lowerCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : List[Any] , **lowerCAmelCase : Tuple , ):
lowerCAmelCase = True
lowerCAmelCase = BertGenerationEncoder(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Optional[int] , ):
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = BertGenerationDecoder(config=lowerCAmelCase ).to(lowerCAmelCase ).eval()
# first forward pass
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , output_hidden_states=lowerCAmelCase , )["""hidden_states"""][0]
lowerCAmelCase = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , output_hidden_states=lowerCAmelCase , )["""hidden_states"""][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
def __lowercase ( self : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , *lowerCAmelCase : Union[str, Any] , ):
lowerCAmelCase = BertGenerationDecoder(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCAmelCase = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : List[Any] ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _a , _a , _a , unittest.TestCase ):
_a = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_a = (BertGenerationDecoder,) if is_torch_available() else ()
_a = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = BertGenerationEncoderTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def __lowercase ( self : Dict ):
self.config_tester.run_common_tests()
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def __lowercase ( self : List[str] ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = """bert"""
self.model_tester.create_and_check_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : str ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase )
def __lowercase ( self : List[str] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCAmelCase )
def __lowercase ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
def __lowercase ( self : List[str] ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase )
@slow
def __lowercase ( self : int ):
lowerCAmelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowercase ( self : Dict ):
lowerCAmelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
lowerCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowerCAmelCase = model(lowerCAmelCase )[0]
lowerCAmelCase = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __lowercase ( self : Tuple ):
lowerCAmelCase = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
lowerCAmelCase = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowerCAmelCase = model(lowerCAmelCase )[0]
lowerCAmelCase = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1e-4 ) )
| 169 | 1 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any]=13 , __magic_name__ : Optional[Any]=32 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=16 , __magic_name__ : Optional[Any]=[1, 2, 1] , __magic_name__ : int=[2, 2, 4] , __magic_name__ : List[Any]=2 , __magic_name__ : Optional[Any]=2.0 , __magic_name__ : Any=True , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Any=0.1 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : Any=False , __magic_name__ : int=True , __magic_name__ : Tuple=0.02 , __magic_name__ : List[str]=1e-5 , __magic_name__ : Union[str, Any]=True , __magic_name__ : List[Any]=None , __magic_name__ : List[Any]=True , __magic_name__ : int=10 , __magic_name__ : List[Any]=8 , __magic_name__ : List[str]=["stage1", "stage2", "stage3"] , __magic_name__ : Any=[1, 2, 3] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ = patch_norm
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = encoder_stride
SCREAMING_SNAKE_CASE_ = out_features
SCREAMING_SNAKE_CASE_ = out_indices
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def __A ( self : List[Any] ) -> int:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __A ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : str ) -> int:
SCREAMING_SNAKE_CASE_ = MaskFormerSwinModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
SCREAMING_SNAKE_CASE_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __A ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = MaskFormerSwinBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = ["stem"]
SCREAMING_SNAKE_CASE_ = MaskFormerSwinBackbone(config=__magic_name__ )
def __A ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : Any ) -> int:
SCREAMING_SNAKE_CASE_ = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def __A ( self : Optional[int] ) -> List[Any]:
pass
def __A ( self : Optional[int] ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self : Any ) -> Optional[int]:
return
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __A ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__magic_name__ )
@unittest.skip("Swin does not use inputs_embeds" )
def __A ( self : Optional[int] ) -> str:
pass
@unittest.skip("Swin does not support feedforward chunking" )
def __A ( self : Union[str, Any] ) -> str:
pass
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __A ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def __A ( self : str ) -> int:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
pass
def __A ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
SCREAMING_SNAKE_CASE_ = outputs.hidden_states
SCREAMING_SNAKE_CASE_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __A ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __A ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def __A ( self : Tuple ) -> Optional[int]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def __A ( self : str ) -> Tuple:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def __A ( self : int ) -> Any:
pass
def __A ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__magic_name__ : Dict ):
SCREAMING_SNAKE_CASE_ = 0
return t
def check_equivalence(__magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str]={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ , return_dict=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(**__magic_name__ , return_dict=__magic_name__ , **__magic_name__ ).to_tuple()
def recursive_check(__magic_name__ : Optional[Any] , __magic_name__ : Any ):
if isinstance(__magic_name__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__magic_name__ , __magic_name__ ):
recursive_check(__magic_name__ , __magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__magic_name__ , __magic_name__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__magic_name__ ) , set_nan_tensor_to_zero(__magic_name__ ) , atol=1e-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(__magic_name__ ).any()} and `inf`: {torch.isinf(__magic_name__ )}. Dict has'''
F''' `nan`: {torch.isnan(__magic_name__ ).any()} and `inf`: {torch.isinf(__magic_name__ )}.'''
) , )
recursive_check(__magic_name__ , __magic_name__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ )
check_equivalence(__magic_name__ , __magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
check_equivalence(__magic_name__ , __magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ )
check_equivalence(__magic_name__ , __magic_name__ , __magic_name__ , {"output_hidden_states": True} )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
check_equivalence(__magic_name__ , __magic_name__ , __magic_name__ , {"output_hidden_states": True} )
@require_torch
class lowerCamelCase (unittest.TestCase , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase__ = MaskFormerSwinConfig
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ = MaskFormerSwinModelTester(self )
def __A ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = backbone_class(__magic_name__ )
backbone.to(__magic_name__ )
backbone.eval()
SCREAMING_SNAKE_CASE_ = backbone(**__magic_name__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __magic_name__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE_ = backbone(**__magic_name__ , output_hidden_states=__magic_name__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE_ = backbone(**__magic_name__ , output_attentions=__magic_name__ )
self.assertIsNotNone(outputs.attentions )
| 356 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356 | 1 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowercase = logging.get_logger(__name__)
lowercase = {
'''artists_file''': '''artists.json''',
'''lyrics_file''': '''lyrics.json''',
'''genres_file''': '''genres.json''',
}
lowercase = {
'''artists_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''',
},
'''genres_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''',
},
'''lyrics_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''',
},
}
lowercase = {
'''jukebox''': 5_1_2,
}
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_LYRIC_TOKENS_SIZES
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self : str , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : int=["v3", "v2", "v2"] , __UpperCamelCase : List[Any]=5_1_2 , __UpperCamelCase : Tuple=5 , __UpperCamelCase : Optional[int]="<|endoftext|>" , **__UpperCamelCase : Optional[Any] , ):
lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
super().__init__(
unk_token=__UpperCamelCase , n_genres=__UpperCamelCase , version=__UpperCamelCase , max_n_lyric_tokens=__UpperCamelCase , **__UpperCamelCase , )
lowerCamelCase_ = version
lowerCamelCase_ = max_n_lyric_tokens
lowerCamelCase_ = n_genres
with open(__UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase_ = json.load(__UpperCamelCase )
with open(__UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase_ = json.load(__UpperCamelCase )
with open(__UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase_ = json.load(__UpperCamelCase )
lowerCamelCase_ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
lowerCamelCase_ = oov.replace(R"""\-'""" , R"""\-+'""" )
lowerCamelCase_ = regex.compile(__UpperCamelCase )
lowerCamelCase_ = {v: k for k, v in self.artists_encoder.items()}
lowerCamelCase_ = {v: k for k, v in self.genres_encoder.items()}
lowerCamelCase_ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def lowercase__ ( self : List[str] ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def lowercase__ ( self : List[str] ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
lowerCamelCase_ = [self.artists_encoder.get(__UpperCamelCase , 0 ) for artist in list_artists]
for genres in range(len(__UpperCamelCase ) ):
lowerCamelCase_ = [self.genres_encoder.get(__UpperCamelCase , 0 ) for genre in list_genres[genres]]
lowerCamelCase_ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
lowerCamelCase_ = [[self.lyrics_encoder.get(__UpperCamelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def lowercase__ ( self : Tuple , __UpperCamelCase : Optional[int] ):
return list(__UpperCamelCase )
def lowercase__ ( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , **__UpperCamelCase : Any ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self.prepare_for_tokenization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = self._tokenize(__UpperCamelCase )
return artist, genre, lyrics
def lowercase__ ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : bool = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
lowerCamelCase_ = artists[idx].lower()
lowerCamelCase_ = [genres[idx].lower()]
else:
lowerCamelCase_ = self._normalize(artists[idx] ) + """.v2"""
lowerCamelCase_ = [
self._normalize(__UpperCamelCase ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowerCamelCase_ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
lowerCamelCase_ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
lowerCamelCase_ = {vocab[index]: index + 1 for index in range(len(__UpperCamelCase ) )}
lowerCamelCase_ = 0
lowerCamelCase_ = len(__UpperCamelCase ) + 1
lowerCamelCase_ = self.vocab
lowerCamelCase_ = {v: k for k, v in self.vocab.items()}
lowerCamelCase_ = """"""
else:
lowerCamelCase_ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
lowerCamelCase_ = self._run_strip_accents(__UpperCamelCase )
lowerCamelCase_ = lyrics.replace("""\\""" , """\n""" )
lowerCamelCase_ = self.out_of_vocab.sub("""""" , __UpperCamelCase ), [], []
return artists, genres, lyrics
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : List[Any] ):
lowerCamelCase_ = unicodedata.normalize("""NFD""" , __UpperCamelCase )
lowerCamelCase_ = []
for char in text:
lowerCamelCase_ = unicodedata.category(__UpperCamelCase )
if cat == "Mn":
continue
output.append(__UpperCamelCase )
return "".join(__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : str ):
lowerCamelCase_ = (
[chr(__UpperCamelCase ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(__UpperCamelCase ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(__UpperCamelCase ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
lowerCamelCase_ = frozenset(__UpperCamelCase )
lowerCamelCase_ = re.compile(R"""_+""" )
lowerCamelCase_ = """""".join([c if c in accepted else """_""" for c in text.lower()] )
lowerCamelCase_ = pattern.sub("""_""" , __UpperCamelCase ).strip("""_""" )
return text
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : List[str] ):
return " ".join(__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : bool = False ):
# Convert to TensorType
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCamelCase_ = TensorType(__UpperCamelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
lowerCamelCase_ = tf.constant
lowerCamelCase_ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
lowerCamelCase_ = torch.tensor
lowerCamelCase_ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
lowerCamelCase_ = jnp.array
lowerCamelCase_ = _is_jax
else:
lowerCamelCase_ = np.asarray
lowerCamelCase_ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowerCamelCase_ = [inputs]
if not is_tensor(__UpperCamelCase ):
lowerCamelCase_ = as_tensor(__UpperCamelCase )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any]="" , __UpperCamelCase : List[Any]="pt" ):
lowerCamelCase_ = [0, 0, 0]
lowerCamelCase_ = [artist] * len(self.version )
lowerCamelCase_ = [genres] * len(self.version )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self.tokenize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._convert_token_to_id(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = [-INFINITY] * len(full_tokens[-1] )
lowerCamelCase_ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__UpperCamelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__UpperCamelCase ) )
lowerCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__UpperCamelCase ) )
lowerCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__UpperCamelCase ) )
return (artists_file, genres_file, lyrics_file)
def lowercase__ ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : Dict ):
lowerCamelCase_ = self.artists_decoder.get(__UpperCamelCase )
lowerCamelCase_ = [self.genres_decoder.get(__UpperCamelCase ) for genre in genres_index]
lowerCamelCase_ = [self.lyrics_decoder.get(__UpperCamelCase ) for character in lyric_index]
return artist, genres, lyrics
| 272 |
import os
import sys
lowercase = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : List[str] ) -> List[str]:
return AutoConfig.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] ) -> Any:
return AutoTokenizer.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Dict ) -> Union[str, Any]:
return AutoModel.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
return AutoModelForCausalLM.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Dict ) -> int:
return AutoModelForMaskedLM.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Dict ) -> Dict:
return AutoModelForSequenceClassification.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : Any , **UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
return AutoModelForQuestionAnswering.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 272 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
def __init__( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Optional[Any]=24 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Any]=1000 , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = range_bbox
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE__ = bbox[i, j, 3]
SCREAMING_SNAKE_CASE__ = bbox[i, j, 1]
SCREAMING_SNAKE_CASE__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE__ = bbox[i, j, 2]
SCREAMING_SNAKE_CASE__ = bbox[i, j, 0]
SCREAMING_SNAKE_CASE__ = t
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A_ ( self : Dict ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A_ ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , ):
SCREAMING_SNAKE_CASE__ = LiltModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ = model(__lowercase , bbox=__lowercase , token_type_ids=__lowercase )
SCREAMING_SNAKE_CASE__ = model(__lowercase , bbox=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , ):
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = LiltForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , ):
SCREAMING_SNAKE_CASE__ = LiltForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
A__ : str =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ : Optional[int] =(
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : Any =False
A__ : Dict =False
def A_ ( self : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str ):
return True
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = LiltModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def A_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ = type
self.model_tester.create_and_check_model(*__lowercase )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
@slow
def A_ ( self : Optional[int] ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = LiltModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_torch
@slow
class lowercase__ ( unittest.TestCase ):
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(__lowercase )
SCREAMING_SNAKE_CASE__ = torch.tensor([[1, 2]] , device=__lowercase )
SCREAMING_SNAKE_CASE__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(input_ids=__lowercase , bbox=__lowercase )
SCREAMING_SNAKE_CASE__ = torch.Size([1, 2, 768] )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__lowercase , )
self.assertTrue(outputs.last_hidden_state.shape , __lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __lowercase , atol=1e-3 ) )
| 715 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LayoutLMv3FeatureExtractor"""]
__snake_case = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 400 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class __magic_name__ ( _a ):
_lowerCAmelCase = '''falcon'''
_lowerCAmelCase = ['''past_key_values''']
def __init__( self : List[Any] , lowerCamelCase__ : Tuple=6_5_0_2_4 , lowerCamelCase__ : List[str]=4_5_4_4 , lowerCamelCase__ : Optional[Any]=3_2 , lowerCamelCase__ : str=7_1 , lowerCamelCase__ : List[str]=1E-5 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : int=None , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : int=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Optional[int]=1_1 , lowerCamelCase__ : int=1_1 , **lowerCamelCase__ : Dict , ):
lowerCAmelCase : Optional[Any] = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase : Dict = kwargs.pop('''n_embed''' , lowerCamelCase__ )
lowerCAmelCase : Optional[int] = hidden_size if n_embed is None else n_embed
lowerCAmelCase : List[str] = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Optional[Any] = layer_norm_epsilon
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : List[str] = use_cache
lowerCAmelCase : Optional[int] = hidden_dropout
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : Any = bos_token_id
lowerCAmelCase : Dict = eos_token_id
lowerCAmelCase : Optional[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
lowerCAmelCase : Optional[Any] = alibi
lowerCAmelCase : Optional[int] = new_decoder_architecture
lowerCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True
lowerCAmelCase : List[str] = parallel_attn
lowerCAmelCase : List[str] = bias
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
@property
def _A ( self : str ):
return self.hidden_size // self.num_attention_heads
@property
def _A ( self : int ):
return not self.alibi
| 348 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowerCAmelCase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowerCAmelCase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="uniform_average" , __magic_name__=True ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = mean_squared_error(
__magic_name__ , __magic_name__ , sample_weight=__magic_name__ , multioutput=__magic_name__ , squared=__magic_name__ )
return {"mse": mse}
| 60 | 0 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , lowercase__ , )
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : List[Any] = RobertaConfig
_SCREAMING_SNAKE_CASE : List[Any] = 'roberta'
def __init__( self : Any , snake_case_ : Any ):
super().__init__(snake_case_ )
__snake_case = RobertaEmbeddings(snake_case_ )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , lowercase__ , )
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = RobertaConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = 'roberta'
def __init__( self : Tuple , snake_case_ : Dict ):
super().__init__(snake_case_ )
__snake_case = config.num_labels
__snake_case = config.num_hidden_layers
__snake_case = DeeRobertaModel(snake_case_ )
__snake_case = nn.Dropout(config.hidden_dropout_prob )
__snake_case = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(snake_case_ )
def lowerCAmelCase ( self : Any , snake_case_ : str=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Dict=None , snake_case_ : Optional[int]=None , snake_case_ : List[Any]=None , snake_case_ : Any=None , snake_case_ : Optional[int]=-1 , snake_case_ : Union[str, Any]=False , ):
__snake_case = self.num_layers
try:
__snake_case = self.roberta(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , position_ids=snake_case_ , head_mask=snake_case_ , inputs_embeds=snake_case_ , )
__snake_case = outputs[1]
__snake_case = self.dropout(snake_case_ )
__snake_case = self.classifier(snake_case_ )
__snake_case = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__snake_case = e.message
__snake_case = e.exit_layer
__snake_case = outputs[0]
if not self.training:
__snake_case = entropy(snake_case_ )
__snake_case = []
__snake_case = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__snake_case = []
for highway_exit in outputs[-1]:
__snake_case = highway_exit[0]
if not self.training:
highway_logits_all.append(snake_case_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(snake_case_ )
if train_highway:
__snake_case = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__snake_case = (loss,) + outputs
if not self.training:
__snake_case = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__snake_case = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 614 |
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = 10 , SCREAMING_SNAKE_CASE = 10_00 , SCREAMING_SNAKE_CASE = True ) -> int:
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(SCREAMING_SNAKE_CASE ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
__snake_case = lower
__snake_case = higher
__snake_case = []
while True:
__snake_case = get_avg(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
last_numbers.append(SCREAMING_SNAKE_CASE )
if answer(SCREAMING_SNAKE_CASE ) == "low":
__snake_case = number
elif answer(SCREAMING_SNAKE_CASE ) == "high":
__snake_case = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
__snake_case = int(input("Enter lower value : " ).strip() )
__snake_case = int(input("Enter high value : " ).strip() )
__snake_case = int(input("Enter value to guess : " ).strip() )
guess_the_number(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 614 | 1 |
"""simple docstring"""
from math import factorial
def __snake_case ( UpperCamelCase__ = 100 ) -> int:
"""simple docstring"""
return sum(int(UpperCamelCase__ ) for x in str(factorial(UpperCamelCase__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 690 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase )
A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )]
A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 4
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def __a ( self : Dict ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __a ( self : List[str] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : str ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : Any ):
A = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
A = scheduler.create_state()
A = scheduler_state
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __a ( self : List[str] ):
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.device_count()
A = num_samples * [prompt]
A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 690 | 1 |
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = {}
def _lowercase ( self ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(lowercase_ , ''' -> ''' , ''' -> '''.join([str(lowercase_ ) for j in self.vertex[i]] ) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowercase_ )
else:
# else make a new vertex
UpperCamelCase : Union[str, Any] = [to_vertex]
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_ )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = True
print(lowercase_ , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_ )
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 716 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__UpperCAmelCase : Dict = False
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase : str = torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = pipe(
image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCamelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Dict = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 643 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : List[str] = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303 |
def _UpperCamelCase ( ) ->list[list[int]]:
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
__a = generate_large_matrix()
__a = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _UpperCamelCase ( lowerCAmelCase_ ) ->None:
assert all(row == sorted(lowerCAmelCase_ , reverse=lowerCAmelCase_ ) for row in grid )
assert all(list(lowerCAmelCase_ ) == sorted(lowerCAmelCase_ , reverse=lowerCAmelCase_ ) for col in zip(*lowerCAmelCase_ ) )
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
UpperCAmelCase = 0
UpperCAmelCase = len(lowerCAmelCase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCAmelCase = (left + right) // 2
UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCAmelCase = mid + 1
else:
UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowerCAmelCase_ )
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
UpperCAmelCase = 0
UpperCAmelCase = len(grid[0] )
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowerCAmelCase_ ) * len(grid[0] )) - total
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
return len([number for row in grid for number in row if number < 0] )
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(lowerCAmelCase_ ):
if number < 0:
total += len(lowerCAmelCase_ ) - i
break
return total
def _UpperCamelCase ( ) ->None:
from timeit import timeit
print("""Running benchmarks""" )
UpperCAmelCase = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCAmelCase = timeit(F"""{func}(grid=grid)""" , setup=lowerCAmelCase_ , number=5_0_0 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 377 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ) -> Any:
"""simple docstring"""
lowercase__ : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : str = num_channels
lowercase__ : Optional[int] = min_resolution
lowercase__ : Optional[Any] = max_resolution
lowercase__ : Dict = do_resize
lowercase__ : Optional[int] = size
lowercase__ : Dict = do_normalize
lowercase__ : Tuple = image_mean
lowercase__ : int = image_std
lowercase__ : Optional[Any] = do_rescale
lowercase__ : str = rescale_factor
lowercase__ : Optional[int] = do_pad
def __a ( self ) -> Optional[int]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __a ( self , lowerCamelCase , lowerCamelCase=False ) -> Dict:
"""simple docstring"""
if not batched:
lowercase__ : Optional[int] = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
lowercase__ , lowercase__ : List[str] = image.size
else:
lowercase__ , lowercase__ : Dict = image.shape[1], image.shape[2]
if w < h:
lowercase__ : Dict = int(self.size["shortest_edge"] * h / w )
lowercase__ : Optional[Any] = self.size["shortest_edge"]
elif w > h:
lowercase__ : Optional[int] = self.size["shortest_edge"]
lowercase__ : List[str] = int(self.size["shortest_edge"] * w / h )
else:
lowercase__ : Dict = self.size["shortest_edge"]
lowercase__ : int = self.size["shortest_edge"]
else:
lowercase__ : List[str] = []
for image in image_inputs:
lowercase__ , lowercase__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ : Optional[int] = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
lowercase__ : Dict = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase( snake_case_ , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Tuple = ConditionalDetrImageProcessingTester(self )
@property
def __a ( self ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
lowercase__ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def __a ( self ) -> List[Any]:
"""simple docstring"""
pass
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
lowercase__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : int = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ , lowercase__ : List[str] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
lowercase__ : List[str] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : Any = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : Any = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : List[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ : Union[str, Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ : List[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowercase__ : Any = json.loads(f.read() )
lowercase__ : Optional[int] = {"image_id": 39769, "annotations": target}
# encode them
lowercase__ : Optional[int] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
lowercase__ : Optional[Any] = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
lowercase__ : Optional[int] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
lowercase__ : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
lowercase__ : Union[str, Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
lowercase__ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
lowercase__ : Union[str, Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
lowercase__ : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
lowercase__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
lowercase__ : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
lowercase__ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
lowercase__ : Optional[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowercase__ : Optional[int] = json.loads(f.read() )
lowercase__ : Tuple = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
lowercase__ : List[str] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowercase__ : Tuple = ConditionalDetrImageProcessor(format="coco_panoptic" )
lowercase__ : Optional[Any] = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
lowercase__ : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
lowercase__ : str = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
lowercase__ : str = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
lowercase__ : int = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
lowercase__ : Dict = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
lowercase__ : Union[str, Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
lowercase__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
lowercase__ : Dict = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
lowercase__ : Optional[Any] = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
lowercase__ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
lowercase__ : Optional[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) ) | 298 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__a : Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Optional[int] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
lowercase__ : Optional[int] = size if size is not None else {"shortest_edge": 224}
lowercase__ : Optional[int] = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
lowercase__ : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowercase__ : Tuple = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="crop_size" )
lowercase__ : List[str] = do_resize
lowercase__ : Tuple = size
lowercase__ : Any = resample
lowercase__ : List[str] = do_center_crop
lowercase__ : Optional[int] = crop_size
lowercase__ : List[str] = do_rescale
lowercase__ : Union[str, Any] = rescale_factor
lowercase__ : Dict = do_normalize
lowercase__ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : str = do_convert_rgb
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ) -> np.ndarray:
"""simple docstring"""
lowercase__ : Union[str, Any] = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase__ : Union[str, Any] = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) -> np.ndarray:
"""simple docstring"""
lowercase__ : List[Any] = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) -> Optional[int]:
"""simple docstring"""
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __a ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowercase__ : List[Any] = size if size is not None else self.size
lowercase__ : Optional[Any] = get_size_dict(lowerCamelCase , param_name="size" , default_to_square=lowerCamelCase )
lowercase__ : Union[str, Any] = resample if resample is not None else self.resample
lowercase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : int = crop_size if crop_size is not None else self.crop_size
lowercase__ : List[str] = get_size_dict(lowerCamelCase , param_name="crop_size" , default_to_square=lowerCamelCase )
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : int = image_std if image_std is not None else self.image_std
lowercase__ : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : int = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : int = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Optional[Any] = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
lowercase__ : Optional[Any] = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
lowercase__ : Optional[Any] = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
lowercase__ : Dict = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
lowercase__ : int = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
lowercase__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase ) | 298 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCamelCase (unittest.TestCase ):
def __init__( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict=7 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : List[str]=1_8 , __UpperCAmelCase : Dict=3_0 , __UpperCAmelCase : List[Any]=4_0_0 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : str=True , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __UpperCAmelCase : List[str]=[0.5, 0.5, 0.5] , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size if size is not None else {'height': 1_8, 'width': 2_0}
SCREAMING_SNAKE_CASE__ = do_thumbnail
SCREAMING_SNAKE_CASE__ = do_align_axis
SCREAMING_SNAKE_CASE__ = do_pad
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCamelCase (__UpperCamelCase ,unittest.TestCase ):
lowerCamelCase__ : Tuple = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_thumbnail""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_align_long_axis""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_pad""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """image_std""" ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 2_0} )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2} )
# Previous config had dimensions in (width, height) order
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) )
self.assertEqual(image_processor.size , {"""height""": 8_4, """width""": 4_2} )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
pass
@is_flaky()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 196 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : str , UpperCAmelCase : Dict , UpperCAmelCase : Dict=13 , UpperCAmelCase : List[Any]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : int=False , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int=99 , UpperCAmelCase : Dict=32 , UpperCAmelCase : Dict=5 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[Any]=64 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : List[str]=16 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Union[str, Any]=0.0_2 , UpperCAmelCase : Dict=3 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : int=None , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict=1 , ) -> List[Any]:
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : Union[str, Any] = seq_length
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : Optional[Any] = use_input_mask
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : str = use_labels
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Optional[int] = type_sequence_label_size
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : int = num_labels
lowerCamelCase__ : Tuple = num_choices
lowerCamelCase__ : Optional[Any] = scope
lowerCamelCase__ : Dict = q_groups
lowerCamelCase__ : Optional[Any] = k_groups
lowerCamelCase__ : Any = v_groups
lowerCamelCase__ : List[str] = post_attention_groups
lowerCamelCase__ : Dict = intermediate_groups
lowerCamelCase__ : Optional[int] = output_groups
def A_ ( self : str ) -> str:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Tuple = None
if self.use_input_mask:
lowerCamelCase__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : Dict = None
if self.use_labels:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : Union[str, Any] ) -> List[Any]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def A_ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] ) -> List[str]:
lowerCamelCase__ : List[Any] = SqueezeBertModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[Any] = model(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = SqueezeBertForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Any ) -> Any:
lowerCamelCase__ : int = self.num_labels
lowerCamelCase__ : Optional[int] = SqueezeBertForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ) -> List[str]:
lowerCamelCase__ : str = self.num_labels
lowerCamelCase__ : int = SqueezeBertForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int ) -> Tuple:
lowerCamelCase__ : Optional[int] = self.num_choices
lowerCamelCase__ : List[str] = SqueezeBertForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : str = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : Tuple = config_and_inputs
lowerCamelCase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCAmelCase__ = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = False
def A_ ( self : Union[str, Any] ) -> Dict:
lowerCamelCase__ : Optional[Any] = SqueezeBertModelTester(self )
lowerCamelCase__ : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A_ ( self : str ) -> str:
self.config_tester.run_common_tests()
def A_ ( self : str ) -> int:
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Any:
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> List[str]:
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Optional[Any]:
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCAmelCase )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> Dict:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Any = SqueezeBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : Optional[Any] ) -> List[str]:
lowerCamelCase__ : Dict = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
lowerCamelCase__ : Optional[int] = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
lowerCamelCase__ : Optional[int] = model(UpperCAmelCase )[0]
lowerCamelCase__ : Dict = torch.Size((1, 3) )
self.assertEqual(output.shape , UpperCAmelCase )
lowerCamelCase__ : List[str] = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-4 ) )
| 295 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = tempfile.mkdtemp()
UpperCamelCase : int = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCamelCase : Tuple = {
"do_resize": True,
"size": {"height": 224, "width": 224},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
"do_convert_rgb": True,
}
UpperCamelCase : int = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A_ , A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase : List[str] = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.get_tokenizer()
UpperCamelCase : List[str] = self.get_rust_tokenizer()
UpperCamelCase : List[Any] = self.get_image_processor()
UpperCamelCase : Tuple = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
UpperCamelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase : Any = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
UpperCamelCase : List[str] = self.get_image_processor(do_normalize=A_ )
UpperCamelCase : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.get_image_processor()
UpperCamelCase : Tuple = self.get_tokenizer()
UpperCamelCase : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : Optional[int] = self.prepare_image_inputs()
UpperCamelCase : Tuple = image_processor(A_ , return_tensors="np" )
UpperCamelCase : List[str] = processor(images=A_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.get_image_processor()
UpperCamelCase : Dict = self.get_tokenizer()
UpperCamelCase : Tuple = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : Any = "Alexandra,T-shirt的价格是15便士。"
UpperCamelCase : List[str] = processor(text=A_ )
UpperCamelCase : Optional[int] = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.get_image_processor()
UpperCamelCase : Optional[int] = self.get_tokenizer()
UpperCamelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : List[Any] = "Alexandra,T-shirt的价格是15便士。"
UpperCamelCase : Union[str, Any] = self.prepare_image_inputs()
UpperCamelCase : List[str] = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.get_image_processor()
UpperCamelCase : Optional[int] = self.get_tokenizer()
UpperCamelCase : str = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase : Any = processor.batch_decode(A_ )
UpperCamelCase : Optional[int] = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.get_image_processor()
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : List[str] = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : Any = "Alexandra,T-shirt的价格是15便士。"
UpperCamelCase : Tuple = self.prepare_image_inputs()
UpperCamelCase : int = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 712 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar("""KT""")
__lowerCamelCase : Dict = TypeVar("""VT""")
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = "root" , A_ = None ):
'''simple docstring'''
UpperCamelCase : int = key
UpperCamelCase : List[Any] = value
UpperCamelCase : list[Node[KT, VT]] = []
def __repr__( self ):
'''simple docstring'''
return F"""Node({self.key}: {self.value})"""
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.forward )
class A__ ( Generic[KT, VT] ):
def __init__( self , A_ = 0.5 , A_ = 16 ):
'''simple docstring'''
UpperCamelCase : Node[KT, VT] = Node[KT, VT]()
UpperCamelCase : List[Any] = 0
UpperCamelCase : Union[str, Any] = p
UpperCamelCase : List[str] = max_level
def __str__( self ):
'''simple docstring'''
UpperCamelCase : int = list(self )
if len(A_ ) == 0:
return F"""SkipList(level={self.level})"""
UpperCamelCase : str = max((len(str(A_ ) ) for item in items) , default=4 )
UpperCamelCase : Dict = max(A_ , 4 ) + 4
UpperCamelCase : str = self.head
UpperCamelCase : List[Any] = []
UpperCamelCase : int = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(A_ , "-" ) + "* " * len(A_ ) )
lines.append(" " * label_size + "| " * len(A_ ) )
while len(node.forward ) != 0:
UpperCamelCase : Union[str, Any] = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(A_ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(A_ ) )
UpperCamelCase : Tuple = node.forward
lines.append("None".ljust(A_ ) + "* " * len(A_ ) )
return F"""SkipList(level={self.level})\n""" + "\n".join(A_ )
def __iter__( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCamelCase : Union[str, Any] = node.forward[0]
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = []
UpperCamelCase : List[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCamelCase : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(A_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : str = self._locate_node(A_ )
if node is not None:
for i, update_node in enumerate(A_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCamelCase : Tuple = node.forward[i]
else:
UpperCamelCase : List[Any] = update_node.forward[:i]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self._locate_node(A_ )
if node is not None:
UpperCamelCase : Union[str, Any] = value
else:
UpperCamelCase : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , A_ ):
update_vector.append(self.head )
UpperCamelCase : Optional[int] = level
UpperCamelCase : Dict = Node(A_ , A_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(A_ )
else:
UpperCamelCase : List[Any] = new_node
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Union[str, Any] = self._locate_node(A_ )
if node is not None:
return node.value
return None
def A_ ( ) -> List[Any]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
UpperCamelCase : Optional[int] = skip_list.head
UpperCamelCase : List[str] = {}
while node.level != 0:
UpperCamelCase : str = node.forward[0]
UpperCamelCase : Optional[int] = node.value
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def A_ ( ) -> List[Any]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
UpperCamelCase : Dict = skip_list.head
UpperCamelCase : Tuple = {}
while node.level != 0:
UpperCamelCase : List[str] = node.forward[0]
UpperCamelCase : Dict = node.value
if len(_lowerCAmelCase ) != 4:
print()
assert len(_lowerCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
assert skip_list.find("Some key" ) is None
def A_ ( ) -> Tuple:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def A_ ( ) -> Dict:
UpperCamelCase : Optional[int] = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def A_ ( ) -> Dict:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[str]:
UpperCamelCase : int = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def A_ ( ) -> List[Any]:
UpperCamelCase : List[Any] = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 142 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(_lowerCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_lowerCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def A_ ( ) -> Union[str, Any]:
def is_sorted(_lowerCAmelCase ):
return all(next_item >= item for item, next_item in zip(_lowerCAmelCase , lst[1:] ) )
UpperCamelCase : int = SkipList()
for i in range(10 ):
skip_list.insert(_lowerCAmelCase , _lowerCAmelCase )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_lowerCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_lowerCAmelCase ) )
def A_ ( ) -> Tuple:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 38 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
snake_case_ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self , A_ , A_ ):
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = 1_00 , A_ = None , A_ = None , A_ = True , ):
if audio_length_in_s is None:
_UpperCamelCase = self.unet.config.sample_size / self.unet.config.sample_rate
_UpperCamelCase = audio_length_in_s * self.unet.config.sample_rate
_UpperCamelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
F" {3 * down_scale_factor / self.unet.config.sample_rate}." )
_UpperCamelCase = int(A_ )
if sample_size % down_scale_factor != 0:
_UpperCamelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
F" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
" process." )
_UpperCamelCase = int(A_ )
_UpperCamelCase = next(iter(self.unet.parameters() ) ).dtype
_UpperCamelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(A_ , A_ ) and len(A_ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(A_ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_UpperCamelCase = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ )
# set step values
self.scheduler.set_timesteps(A_ , device=audio.device )
_UpperCamelCase = self.scheduler.timesteps.to(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCamelCase = self.unet(A_ , A_ ).sample
# 2. compute previous image: x_t -> t_t-1
_UpperCamelCase = self.scheduler.step(A_ , A_ , A_ ).prev_sample
_UpperCamelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
_UpperCamelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=A_ )
| 138 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def a ( self ):
torch.manual_seed(0 )
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def a ( self ):
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = ScoreSdeVeScheduler()
_UpperCamelCase = ScoreSdeVePipeline(unet=A_ , scheduler=A_ )
sde_ve.to(A_ )
sde_ve.set_progress_bar_config(disable=A_ )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A_ ).images
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A_ , return_dict=A_ )[
0
]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def a ( self ):
_UpperCamelCase = "google/ncsnpp-church-256"
_UpperCamelCase = UNetaDModel.from_pretrained(A_ )
_UpperCamelCase = ScoreSdeVeScheduler.from_pretrained(A_ )
_UpperCamelCase = ScoreSdeVePipeline(unet=A_ , scheduler=A_ )
sde_ve.to(A_ )
sde_ve.set_progress_bar_config(disable=A_ )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=A_ ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 138 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : str = logging.get_logger(__name__)
set_seed(770)
_snake_case : Tuple = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
_snake_case : Optional[Any] = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
_snake_case : Union[str, Any] = os.path.dirname(os.path.abspath(__file__))
_snake_case : List[Any] = os.path.join(os.path.expanduser("""~"""), """.cache""")
_snake_case : str = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def _a ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple=False ):
_SCREAMING_SNAKE_CASE = model_type
if use_small:
key += "_small"
return os.path.join(_SCREAMING_SNAKE_CASE , REMOTE_MODEL_PATHS[key]["file_name"] )
def _a ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple ):
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
hf_hub_download(repo_id=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , local_dir=_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple="text" ):
if model_type == "text":
_SCREAMING_SNAKE_CASE = BarkSemanticModel
_SCREAMING_SNAKE_CASE = BarkSemanticConfig
_SCREAMING_SNAKE_CASE = BarkSemanticGenerationConfig
elif model_type == "coarse":
_SCREAMING_SNAKE_CASE = BarkCoarseModel
_SCREAMING_SNAKE_CASE = BarkCoarseConfig
_SCREAMING_SNAKE_CASE = BarkCoarseGenerationConfig
elif model_type == "fine":
_SCREAMING_SNAKE_CASE = BarkFineModel
_SCREAMING_SNAKE_CASE = BarkFineConfig
_SCREAMING_SNAKE_CASE = BarkFineGenerationConfig
else:
raise NotImplementedError()
_SCREAMING_SNAKE_CASE = F'{model_type}_small' if use_small else model_type
_SCREAMING_SNAKE_CASE = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info["repo_id"] , model_info["file_name"] )
_SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
# this is a hack
_SCREAMING_SNAKE_CASE = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
_SCREAMING_SNAKE_CASE = model_args["vocab_size"]
_SCREAMING_SNAKE_CASE = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_SCREAMING_SNAKE_CASE = model_args.pop("n_head" )
_SCREAMING_SNAKE_CASE = model_args.pop("n_embd" )
_SCREAMING_SNAKE_CASE = model_args.pop("n_layer" )
_SCREAMING_SNAKE_CASE = ConfigClass(**checkpoint["model_args"] )
_SCREAMING_SNAKE_CASE = ModelClass(config=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = GenerationConfigClass()
_SCREAMING_SNAKE_CASE = model_generation_config
_SCREAMING_SNAKE_CASE = checkpoint["model"]
# fixup checkpoint
_SCREAMING_SNAKE_CASE = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(_SCREAMING_SNAKE_CASE ):
# replace part of the key with corresponding layer name in HF implementation
_SCREAMING_SNAKE_CASE = k[len(_SCREAMING_SNAKE_CASE ) :]
for old_layer_name in new_layer_name_dict:
_SCREAMING_SNAKE_CASE = new_k.replace(_SCREAMING_SNAKE_CASE , new_layer_name_dict[old_layer_name] )
_SCREAMING_SNAKE_CASE = state_dict.pop(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = set(state_dict.keys() ) - set(model.state_dict().keys() )
_SCREAMING_SNAKE_CASE = {k for k in extra_keys if not k.endswith(".attn.bias" )}
_SCREAMING_SNAKE_CASE = set(model.state_dict().keys() ) - set(state_dict.keys() )
_SCREAMING_SNAKE_CASE = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F'extra keys found: {extra_keys}' )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F'missing keys: {missing_keys}' )
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = checkpoint["best_val_loss"].item()
logger.info(F'model loaded: {round(n_params/1e6 , 1 )}M params, {round(_SCREAMING_SNAKE_CASE , 3 )} loss' )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
del checkpoint, state_dict
return model
def _a ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any]=False , _SCREAMING_SNAKE_CASE : List[Any]="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_SCREAMING_SNAKE_CASE = "cpu" # do conversion on cpu
_SCREAMING_SNAKE_CASE = _get_ckpt_path(_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = _load_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model_type=_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
# load bark initial model
_SCREAMING_SNAKE_CASE = _bark_load_model(_SCREAMING_SNAKE_CASE , "cpu" , model_type=_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
if model_type == "text":
_SCREAMING_SNAKE_CASE = bark_model["model"]
if model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
_SCREAMING_SNAKE_CASE = 5
_SCREAMING_SNAKE_CASE = 10
if model_type in ["text", "coarse"]:
_SCREAMING_SNAKE_CASE = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_SCREAMING_SNAKE_CASE = bark_model(_SCREAMING_SNAKE_CASE )[0]
_SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )
# take last logits
_SCREAMING_SNAKE_CASE = output_new_model_total.logits[:, [-1], :]
else:
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = bark_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , ):
_SCREAMING_SNAKE_CASE = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = BarkSemanticConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) )
_SCREAMING_SNAKE_CASE = BarkCoarseConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) )
_SCREAMING_SNAKE_CASE = BarkFineConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) )
_SCREAMING_SNAKE_CASE = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
_SCREAMING_SNAKE_CASE = BarkSemanticModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = BarkCoarseModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = BarkFineModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = EncodecModel.from_pretrained("facebook/encodec_24khz" )
_SCREAMING_SNAKE_CASE = BarkConfig.from_sub_model_configs(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_SCREAMING_SNAKE_CASE = BarkModel(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = semantic
_SCREAMING_SNAKE_CASE = coarseAcoustic
_SCREAMING_SNAKE_CASE = fineAcoustic
_SCREAMING_SNAKE_CASE = codec
_SCREAMING_SNAKE_CASE = bark_generation_config
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
bark.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id=_SCREAMING_SNAKE_CASE , push_to_hub=_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
_snake_case : Tuple = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small) | 493 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
a : Optional[int] = KandinskyImgaImgPipeline
a : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
a : Union[str, Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : int = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Tuple = False
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return 32
@property
def lowercase ( self ):
return self.time_input_dim
@property
def lowercase ( self ):
return self.time_input_dim * 4
@property
def lowercase ( self ):
return 100
@property
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
_SCREAMING_SNAKE_CASE = MultilingualCLIP(UpperCamelCase )
_SCREAMING_SNAKE_CASE = text_encoder.eval()
return text_encoder
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def lowercase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.dummy_text_encoder
_SCREAMING_SNAKE_CASE = self.dummy_tokenizer
_SCREAMING_SNAKE_CASE = self.dummy_unet
_SCREAMING_SNAKE_CASE = self.dummy_movq
_SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_SCREAMING_SNAKE_CASE = DDIMScheduler(**UpperCamelCase )
_SCREAMING_SNAKE_CASE = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase ( self , UpperCamelCase , UpperCamelCase=0 ):
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase )
# create init_image
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(UpperCamelCase ).startswith("mps" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_SCREAMING_SNAKE_CASE = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = "cpu"
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
_SCREAMING_SNAKE_CASE = output.images
_SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
_SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_SCREAMING_SNAKE_CASE = "A red cartoon frog, 4k"
_SCREAMING_SNAKE_CASE = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
_SCREAMING_SNAKE_CASE = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = torch.Generator(device="cpu" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
_SCREAMING_SNAKE_CASE = pipeline(
UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase ) | 493 | 1 |
def UpperCamelCase ( __lowercase : list[int] ,__lowercase : list[int] ):
'''simple docstring'''
if not len(__lowercase ) == len(__lowercase ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
A_ , A_ , A_ : List[str] = equationa
A_ , A_ , A_ : Any = equationa
# Calculate the determinants of the matrices
A_ : int = aa * ba - aa * ba
A_ : int = ca * ba - ca * ba
A_ : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
A_ : str = determinant_x / determinant
A_ : str = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 558 | import sys
import turtle
def UpperCamelCase ( __lowercase : tuple[float, float] ,__lowercase : tuple[float, float] ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def UpperCamelCase ( __lowercase : tuple[float, float] ,__lowercase : tuple[float, float] ,__lowercase : tuple[float, float] ,__lowercase : int ,):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(__lowercase ,get_mid(__lowercase ,__lowercase ) ,get_mid(__lowercase ,__lowercase ) ,depth - 1 )
triangle(__lowercase ,get_mid(__lowercase ,__lowercase ) ,get_mid(__lowercase ,__lowercase ) ,depth - 1 )
triangle(__lowercase ,get_mid(__lowercase ,__lowercase ) ,get_mid(__lowercase ,__lowercase ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
_UpperCAmelCase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
_UpperCAmelCase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 558 | 1 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Optional[int] = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] = AlbertTokenizer
snake_case__ : Dict = AlbertTokenizerFast
snake_case__ : List[str] = True
snake_case__ : Optional[int] = True
snake_case__ : Union[str, Any] = True
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = AlbertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : int ) -> Tuple:
__SCREAMING_SNAKE_CASE = "this is a test"
__SCREAMING_SNAKE_CASE = "this is a test"
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = "<pad>"
__SCREAMING_SNAKE_CASE = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(UpperCAmelCase__ ) , 3_0_0_0_0 )
def UpperCAmelCase_ ( self : int ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def UpperCAmelCase_ ( self : int ) -> Tuple:
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = "I was born in 92000, and this is falsé."
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = tokenizer.encode(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = AlbertTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase__ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [4_8, 2_5, 2_1, 1_2_8_9] )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def UpperCAmelCase_ ( self : int ) -> int:
__SCREAMING_SNAKE_CASE = AlbertTokenizer(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.encode("sequence builders" )
__SCREAMING_SNAKE_CASE = tokenizer.encode("multi-sequence build" )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
# fmt: off
__SCREAMING_SNAKE_CASE = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 710 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a__ : Dict = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Tuple ) -> None:
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 553 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['LayoutLMv2FeatureExtractor']
_A = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 159 |
import math
snake_case__ = 10
snake_case__ = 7
snake_case__ = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCamelCase__ ( a : int = 20 ) -> str:
"""simple docstring"""
a__ :List[str] = math.comb(a , a )
a__ :Optional[int] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a )
a__ :Union[str, Any] = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 395 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : int = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Optional[Any] = 'visual_bert'
def __init__( self : Union[str, Any] , __UpperCAmelCase : Dict=3_0_5_2_2 , __UpperCAmelCase : List[str]=7_6_8 , __UpperCAmelCase : Any=5_1_2 , __UpperCAmelCase : Union[str, Any]=1_2 , __UpperCAmelCase : List[Any]=1_2 , __UpperCAmelCase : List[str]=3_0_7_2 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Optional[Any]=5_1_2 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : Optional[Any]=1e-12 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : int=True , __UpperCAmelCase : int=1 , __UpperCAmelCase : Dict=0 , __UpperCAmelCase : int=2 , **__UpperCAmelCase : List[Any] , ) -> Any:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = visual_embedding_dim
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = bypass_transformer
SCREAMING_SNAKE_CASE__ = special_visual_initialize
| 616 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : int = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : List[str] = 'swin2sr'
lowerCamelCase__ : Optional[Any] = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=6_4 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : List[str]=1_8_0 , __UpperCAmelCase : List[str]=[6, 6, 6, 6, 6, 6] , __UpperCAmelCase : Optional[Any]=[6, 6, 6, 6, 6, 6] , __UpperCAmelCase : Tuple=8 , __UpperCAmelCase : Union[str, Any]=2.0 , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : List[Any]=1e-5 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Optional[int]=1.0 , __UpperCAmelCase : Union[str, Any]="1conv" , __UpperCAmelCase : List[Any]="pixelshuffle" , **__UpperCAmelCase : Any , ) -> Optional[int]:
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = window_size
SCREAMING_SNAKE_CASE__ = mlp_ratio
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = upscale
SCREAMING_SNAKE_CASE__ = img_range
SCREAMING_SNAKE_CASE__ = resi_connection
SCREAMING_SNAKE_CASE__ = upsampler
| 616 | 1 |
import re
def _a ( __UpperCamelCase : str ):
if len(re.findall('''[ATCG]''' ,__UpperCamelCase ) ) != len(__UpperCamelCase ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' ,'''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
A__ : Dict = TypeVar("""T""")
def _a ( __UpperCamelCase : int ):
return (position - 1) // 2
def _a ( __UpperCamelCase : int ):
return (2 * position) + 1
def _a ( __UpperCamelCase : int ):
return (2 * position) + 2
class lowercase ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
lowerCAmelCase__ : list[tuple[T, int]] = []
lowerCAmelCase__ : dict[T, int] = {}
lowerCAmelCase__ : int = 0
def __len__( self ):
"""simple docstring"""
return self.elements
def __repr__( self ):
"""simple docstring"""
return str(self.heap )
def lowercase_ ( self ):
"""simple docstring"""
return self.elements == 0
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
self.heap.append((elem, weight) )
lowerCAmelCase__ : Dict = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self ):
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.position_map[elem]
lowerCAmelCase__ : str = (elem, weight)
if position > 0:
lowerCAmelCase__ : Any = get_parent_position(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.position_map[elem]
if curr_pos == 0:
return None
lowerCAmelCase__ : Optional[int] = get_parent_position(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.heap[curr_pos]
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.position_map[elem]
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.heap[curr_pos]
lowerCAmelCase__ : Tuple = get_child_left_position(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Any = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.heap[child_left_position]
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Dict = self.heap[nodea_pos][0]
lowerCAmelCase__ : Dict = self.heap[nodea_pos][0]
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCAmelCase__ : Tuple = nodea_pos
lowerCAmelCase__ : int = nodea_pos
class lowercase ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
lowerCAmelCase__ : dict[T, dict[T, int]] = {}
lowerCAmelCase__ : int = 0
def __repr__( self ):
"""simple docstring"""
return str(self.connections )
def __len__( self ):
"""simple docstring"""
return self.nodes
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if node not in self.connections:
lowerCAmelCase__ : Union[str, Any] = {}
self.nodes += 1
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Tuple = weight
lowerCAmelCase__ : Tuple = weight
def _a ( __UpperCamelCase : GraphUndirectedWeighted[T] ,):
lowerCAmelCase__ : dict[T, int] = {node: maxsize for node in graph.connections}
lowerCAmelCase__ : dict[T, T | None] = {node: None for node in graph.connections}
lowerCAmelCase__ : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__UpperCamelCase ,__UpperCamelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCAmelCase__ : List[Any] = priority_queue.extract_min()
lowerCAmelCase__ : str = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase__ : List[str] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCamelCase ,dist[neighbour] )
lowerCAmelCase__ : Optional[Any] = node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCAmelCase__ : List[str] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase__ : Optional[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCamelCase ,dist[neighbour] )
lowerCAmelCase__ : Optional[int] = node
return dist, parent
| 233 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ReformerTokenizer
lowercase__ = ReformerTokenizerFast
lowercase__ = True
lowercase__ = False
lowercase__ = True
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
super().setUp()
_snake_case : Any = ReformerTokenizer(a_, keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : str = """<s>"""
_snake_case : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ), a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ), a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<unk>""" )
self.assertEqual(vocab_keys[1], """<s>""" )
self.assertEqual(vocab_keys[-1], """j""" )
self.assertEqual(len(a_ ), 1_000 )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_000 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : Union[str, Any] = self.get_tokenizer()
_snake_case : Union[str, Any] = self.get_rust_tokenizer()
_snake_case : Tuple = """I was born in 92000, and this is falsé."""
_snake_case : List[Any] = tokenizer.tokenize(a_ )
_snake_case : Optional[int] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
_snake_case : Optional[int] = tokenizer.encode(a_, add_special_tokens=a_ )
_snake_case : Dict = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
_snake_case : Tuple = self.get_rust_tokenizer()
_snake_case : List[str] = tokenizer.encode(a_ )
_snake_case : List[str] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any], a_: str=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_snake_case : int = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
# Simple input
_snake_case : List[Any] = """This is a simple input"""
_snake_case : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_snake_case : Union[str, Any] = ("""This is a simple input""", """This is a pair""")
_snake_case : int = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Simple input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
# Pair input
self.assertRaises(a_, tokenizer_r.encode, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(a_, tokenizer_r.encode_plus, a_, max_length=a_, padding="""max_length""" )
# Pair input
self.assertRaises(
a_, tokenizer_r.batch_encode_plus, a_, max_length=a_, padding="""max_length""", )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = ReformerTokenizer(a_, keep_accents=a_ )
_snake_case : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a_, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ), [285, 46, 10, 170, 382], )
_snake_case : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
], )
_snake_case : List[Any] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], )
_snake_case : List[str] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
], )
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = """Hello World!"""
_snake_case : Tuple = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(a_, self.big_tokenizer.encode(a_ ) )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
_snake_case : List[Any] = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(a_, self.big_tokenizer.encode(a_ ) )
@require_torch
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : Tuple = """ """.join(a_ )
_snake_case : Union[str, Any] = self.big_tokenizer.encode_plus(a_, return_tensors="""pt""" )
_snake_case : List[Any] = self.big_tokenizer.batch_encode_plus([sequence, sequence], return_tensors="""pt""" )
_snake_case : List[str] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case : Optional[Any] = encoded_sequence["""input_ids"""].shape
_snake_case : Dict = ReformerModel(a_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a_ )
model(**a_ )
@slow
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Dict = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case : Tuple = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=a_, model_name="""google/reformer-crime-and-punishment""", revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""", padding=a_, sequences=a_, )
| 704 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case__ : List[Any] , snake_case__ : Optional[Any]="" , snake_case__ : Tuple="." ):
_snake_case : Union[str, Any] = []
for k, v in d.items():
_snake_case : List[str] = parent_key + sep + k if parent_key else k
if isinstance(snake_case__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case__ , snake_case__ , sep=snake_case__ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case__ )
_snake_case : Dict = argparse.Namespace()
with open(snake_case__ , """r""" ) as yaml_file:
try:
_snake_case : List[Any] = yaml.load(snake_case__ , Loader=yaml.FullLoader )
_snake_case : Any = flatten_yaml_as_dict(snake_case__ )
for k, v in flat_cfg.items():
setattr(snake_case__ , snake_case__ , snake_case__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case__ , str(snake_case__ ) ) )
return config
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = MobileViTVaConfig()
_snake_case : Optional[int] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_snake_case : Dict = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Union[str, Any] = 3_84
else:
_snake_case : Optional[Any] = 2_56
_snake_case : str = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_snake_case : str = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
_snake_case : Dict = 3_84
else:
_snake_case : Union[str, Any] = 2_56
_snake_case : Tuple = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_snake_case : Tuple = 1_51
_snake_case : str = 5_12
_snake_case : List[Any] = """ade20k-id2label.json"""
_snake_case : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
_snake_case : List[Any] = 21
_snake_case : List[str] = 5_12
_snake_case : int = """pascal-voc-id2label.json"""
_snake_case : int = True
# orig_config
_snake_case : int = load_orig_config_file(snake_case__ )
assert getattr(snake_case__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : str = getattr(snake_case__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(snake_case__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : int = getattr(snake_case__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
_snake_case : Tuple = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
_snake_case : Any = getattr(snake_case__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
_snake_case : Union[str, Any] = """huggingface/label-files"""
_snake_case : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : Tuple = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
_snake_case : List[str] = dct.pop(snake_case__ )
_snake_case : List[Any] = val
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : int=False ):
"""simple docstring"""
if base_model:
_snake_case : Any = """"""
else:
_snake_case : Union[str, Any] = """mobilevitv2."""
_snake_case : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[str] = k[8:]
else:
_snake_case : str = k
if ".block." in k:
_snake_case : Optional[int] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
_snake_case : Union[str, Any] = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
_snake_case : str = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
_snake_case : int = k_new.replace("""conv_1.""" , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
_snake_case : Optional[Any] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
_snake_case : Tuple = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
_snake_case : Any = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
_snake_case : str = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Optional[Any] = [0, 1]
elif i == 4:
_snake_case : Any = [0, 1, 2, 3]
elif i == 5:
_snake_case : List[Any] = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
_snake_case : Any = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
_snake_case : List[Any] = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
_snake_case : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
_snake_case : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_snake_case : Tuple = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_snake_case : Any = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
_snake_case : List[str] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
_snake_case : str = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
_snake_case : Optional[int] = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
_snake_case : int = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case__ )
for k in keys_to_ignore:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : int = get_mobilevitva_config(snake_case__ , snake_case__ )
# load original state_dict
_snake_case : Optional[int] = torch.load(snake_case__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_snake_case : Any = MobileViTVaForSemanticSegmentation(snake_case__ ).eval()
_snake_case : List[Any] = False
else:
_snake_case : List[Any] = MobileViTVaForImageClassification(snake_case__ ).eval()
_snake_case : Optional[Any] = False
# remove and rename some keys of load the original model
_snake_case : Union[str, Any] = checkpoint
remove_unused_keys(snake_case__ )
_snake_case : List[str] = create_rename_keys(snake_case__ , base_model=snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load modified state_dict
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Optional[int] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
_snake_case : Optional[Any] = model(**snake_case__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_snake_case : List[str] = outputs.logits
_snake_case : Any = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 28 | 0 |
def lowercase_ (A : str ):
snake_case__ : Union[str, Any] = [0 for i in range(len(A ) )]
# initialize interval's left pointer and right pointer
snake_case__ , snake_case__ : Optional[int] = 0, 0
for i in range(1 , len(A ) ):
# case when current index is inside the interval
if i <= right_pointer:
snake_case__ : Union[str, Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
snake_case__ : Tuple = min_edge
while go_next(A , A , A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
snake_case__ , snake_case__ : Union[str, Any] = i, i + z_result[i] - 1
return z_result
def lowercase_ (A : int , A : list[int] , A : str ):
return i + z_result[i] < len(A ) and s[z_result[i]] == s[i + z_result[i]]
def lowercase_ (A : str , A : str ):
snake_case__ : int = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
snake_case__ : int = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a_ :List[str] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int, _snake_case : Union[str, Any], _snake_case : List[str]=7, _snake_case : int=3, _snake_case : List[Any]=1_8, _snake_case : List[str]=3_0, _snake_case : str=4_0_0, _snake_case : Optional[Any]=None, _snake_case : Dict=True, _snake_case : str=True, _snake_case : Union[str, Any]=None, ) ->str:
snake_case__ : int = size if size is not None else {'height': 2_0, 'width': 2_0}
snake_case__ : Optional[Any] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : List[str] = image_size
snake_case__ : List[str] = min_resolution
snake_case__ : int = max_resolution
snake_case__ : Union[str, Any] = size
snake_case__ : Tuple = do_normalize
snake_case__ : List[str] = do_convert_rgb
snake_case__ : List[Any] = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
snake_case__ : Tuple = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6}
def lowercase_ ( self : str ) ->int:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase_ ( self : Optional[Any] ) ->str:
snake_case__ : List[Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
snake_case__ : List[Any] = Image.open(requests.get(_snake_case, stream=_snake_case ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PixaStructImageProcessor if is_vision_available() else None
def lowercase_ ( self : str ) ->str:
snake_case__ : Optional[int] = PixaStructImageProcessingTester(self )
@property
def lowercase_ ( self : Union[str, Any] ) ->Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : str ) ->Union[str, Any]:
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case, 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case, 'do_convert_rgb' ) )
def lowercase_ ( self : str ) ->Union[str, Any]:
snake_case__ : List[str] = self.image_processor_tester.prepare_dummy_image()
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
snake_case__ : Optional[int] = 2_0_4_8
snake_case__ : Optional[int] = image_processor(_snake_case, return_tensors='pt', max_patches=_snake_case )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0_6_0_6 ), atol=1e-3, rtol=1e-3 ) )
def lowercase_ ( self : Tuple ) ->Dict:
# Initialize image_processor
snake_case__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case__ : Optional[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ : Optional[Any] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : Any = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase_ ( self : List[str] ) ->Optional[Any]:
# Initialize image_processor
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case__ : Optional[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
snake_case__ : Union[str, Any] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_snake_case ):
snake_case__ : int = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
snake_case__ : Optional[Any] = 'Hello'
snake_case__ : Dict = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case, header_text=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : List[Any] = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case, header_text=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase_ ( self : Any ) ->int:
# Initialize image_processor
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, np.ndarray )
snake_case__ : Union[str, Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ : List[str] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : Dict = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase_ ( self : List[Any] ) ->List[Any]:
# Initialize image_processor
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, torch.Tensor )
# Test not batched input
snake_case__ : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ : Optional[Any] = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : int = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PixaStructImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ) ->Union[str, Any]:
snake_case__ : Union[str, Any] = PixaStructImageProcessingTester(self, num_channels=4 )
snake_case__ : int = 3
@property
def lowercase_ ( self : Optional[Any] ) ->List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Optional[int] ) ->Optional[int]:
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case, 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case, 'do_convert_rgb' ) )
def lowercase_ ( self : Optional[int] ) ->str:
# Initialize image_processor
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case__ : List[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case__ : Any = image_processor(
image_inputs[0], return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
snake_case__ : Dict = image_processor(
_snake_case, return_tensors='pt', max_patches=_snake_case ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 478 | 1 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
A_ = _symbol_database.Default()
A_ = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
A_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
A_ = None
A_ = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
A_ = 45
A_ = 15_81
A_ = 15_17
A_ = 15_70
A_ = 15_84
A_ = 17_93
A_ = 17_95
A_ = 19_16
A_ = 18_64
A_ = 19_05
A_ = 19_19
A_ = 24_29
A_ = 22_08
A_ = 24_18
A_ = 23_23
A_ = 24_07
# @@protoc_insertion_point(module_scope)
| 123 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
A_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
A_ = typing.Union[np.floataa, int, float] # noqa: UP007
def A ( _UpperCAmelCase : Vector ,_UpperCAmelCase : Vector ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_UpperCAmelCase ) - np.asarray(_UpperCAmelCase )) ** 2 ) )
def A ( _UpperCAmelCase : Vector ,_UpperCAmelCase : Vector ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def A ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' ,number=1_0_0_0_0 ,globals=globals() ,) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' ,number=1_0_0_0_0 ,globals=globals() ,) )
benchmark()
| 123 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''biogpt'''
def __init__( self , lowerCamelCase_=4_2_3_8_4 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=2_4 , lowerCamelCase_=1_6 , lowerCamelCase_=4_0_9_6 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , **lowerCamelCase_ , ) -> Union[str, Any]:
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = scale_embedding
UpperCamelCase = use_cache
UpperCamelCase = layerdrop
UpperCamelCase = activation_dropout
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_) | 34 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : str ):
UpperCAmelCase__ :Any = list(UpperCamelCase_ )
UpperCAmelCase__ :Optional[int] = list(UpperCamelCase_ )
UpperCAmelCase__ :str = 0
for i in range(len(UpperCamelCase_ ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase__ :Union[str, Any] = '''_'''
if count > 1:
return False
else:
return "".join(UpperCamelCase_ )
def a__ ( UpperCamelCase_ : list[str] ):
UpperCAmelCase__ :int = []
while True:
UpperCAmelCase__ :Dict = ['''$'''] * len(UpperCamelCase_ )
UpperCAmelCase__ :List[str] = []
for i in range(len(UpperCamelCase_ ) ):
for j in range(i + 1, len(UpperCamelCase_ ) ):
UpperCAmelCase__ :Optional[Any] = compare_string(binary[i], binary[j] )
if k is False:
UpperCAmelCase__ :Any = '''*'''
UpperCAmelCase__ :List[Any] = '''*'''
temp.append('''X''' )
for i in range(len(UpperCamelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(UpperCamelCase_ ) == 0:
return pi
UpperCAmelCase__ :Tuple = list(set(UpperCamelCase_ ) )
def a__ ( UpperCamelCase_ : int, UpperCamelCase_ : Sequence[float] ):
UpperCAmelCase__ :int = []
for minterm in minterms:
UpperCAmelCase__ :int = ''''''
for _ in range(UpperCamelCase_ ):
UpperCAmelCase__ :Optional[int] = str(minterm % 2 ) + string
minterm //= 2
temp.append(UpperCamelCase_ )
return temp
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : int ):
UpperCAmelCase__ :Dict = list(UpperCamelCase_ )
UpperCAmelCase__ :str = list(UpperCamelCase_ )
UpperCAmelCase__ :str = 0
for i in range(len(UpperCamelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def a__ ( UpperCamelCase_ : list[list[int]], UpperCamelCase_ : list[str] ):
UpperCAmelCase__ :Optional[Any] = []
UpperCAmelCase__ :List[Any] = [0] * len(UpperCamelCase_ )
for i in range(len(chart[0] ) ):
UpperCAmelCase__ :Optional[Any] = 0
UpperCAmelCase__ :Union[str, Any] = -1
for j in range(len(UpperCamelCase_ ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase__ :Any = j
if count == 1:
UpperCAmelCase__ :Any = 1
for i in range(len(UpperCamelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :int = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase__ :Optional[int] = 0
UpperCAmelCase__ :Dict = -1
UpperCAmelCase__ :Optional[Any] = 0
for i in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :str = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase__ :Any = count_n
UpperCAmelCase__ :List[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :Optional[int] = 0
def a__ ( UpperCamelCase_ : list[str], UpperCamelCase_ : list[str] ):
UpperCAmelCase__ :List[str] = [[0 for x in range(len(UpperCamelCase_ ) )] for x in range(len(UpperCamelCase_ ) )]
for i in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :Tuple = prime_implicants[i].count('''_''' )
for j in range(len(UpperCamelCase_ ) ):
if is_for_table(prime_implicants[i], binary[j], UpperCamelCase_ ):
UpperCAmelCase__ :List[str] = 1
return chart
def a__ ( ):
UpperCAmelCase__ :int = int(input('''Enter the no. of variables\n''' ) )
UpperCAmelCase__ :Tuple = [
float(UpperCamelCase_ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
UpperCAmelCase__ :Union[str, Any] = decimal_to_binary(UpperCamelCase_, UpperCamelCase_ )
UpperCAmelCase__ :Optional[Any] = check(UpperCamelCase_ )
print('''Prime Implicants are:''' )
print(UpperCamelCase_ )
UpperCAmelCase__ :Optional[int] = prime_implicant_chart(UpperCamelCase_, UpperCamelCase_ )
UpperCAmelCase__ :Dict = selection(UpperCamelCase_, UpperCamelCase_ )
print('''Essential Prime Implicants are:''' )
print(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 467 | 0 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> int:
lowerCamelCase_ = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase_ = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
lowerCamelCase_ = F'''{src_lang}-{tgt_lang}'''
lowerCamelCase_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
lowerCamelCase_ = os.path.join(UpperCAmelCase__ , """README.md""" )
print(F'''Generating {path}''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(UpperCAmelCase__ )
# make sure we are under the root of the project
lowercase = Path(__file__).resolve().parent.parent.parent
lowercase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase , lowercase , lowercase = model_name.split('''-''')
lowercase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 103 |
def __lowerCAmelCase ( UpperCAmelCase__ : int = 4_0_0_0_0_0_0 ) -> int:
lowerCamelCase_ = []
lowerCamelCase_ , lowerCamelCase_ = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(UpperCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = b, a + b
return sum(UpperCAmelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 103 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : Union[str, Any] = StableUnCLIPPipeline
A_ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
A_ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
A_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
A_ : Optional[Any] = False
def __UpperCamelCase ( self : Tuple ) -> str:
A = 32
A = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCamelCase , projection_dim=__UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
A = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__UpperCamelCase , num_layers=1 , )
torch.manual_seed(0 )
A = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_000 , clip_sample=__UpperCamelCase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
A = StableUnCLIPImageNormalizer(embedding_dim=__UpperCamelCase )
A = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
A = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
A = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__UpperCamelCase , layers_per_block=1 , upcast_attention=__UpperCamelCase , use_linear_projection=__UpperCamelCase , )
torch.manual_seed(0 )
A = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='v_prediction' , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
A = AutoencoderKL()
A = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str=0 ) -> Optional[int]:
if str(__UpperCamelCase ).startswith('mps' ):
A = torch.manual_seed(__UpperCamelCase )
else:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
A = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=__UpperCamelCase )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
A = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__UpperCamelCase )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
A = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A = torch.Generator(device='cpu' ).manual_seed(0 )
A = pipe('anime turle' , generator=__UpperCamelCase , output_type='np' )
A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
def __UpperCamelCase ( self : List[str] ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
A = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
A = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 106 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ ={"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ =["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ =["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ =[
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 482 | 0 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = '▁'
UpperCAmelCase__ : Tuple = {'vocab_file': 'prophetnet.tokenizer'}
UpperCAmelCase__ : int = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
UpperCAmelCase__ : Union[str, Any] = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
UpperCAmelCase__ : Optional[int] = {
'microsoft/xprophetnet-large-wiki100-cased': 5_1_2,
}
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = collections.OrderedDict()
with open(_snake_case ,"""r""" ,encoding="""utf-8""" ) as reader:
SCREAMING_SNAKE_CASE__ : Any = reader.readlines()
for index, token in enumerate(_snake_case ):
SCREAMING_SNAKE_CASE__ : str = token.rstrip("""\n""" )
SCREAMING_SNAKE_CASE__ : Dict = index
return vocab
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Any = ['''input_ids''', '''attention_mask''']
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[UNK]" , SCREAMING_SNAKE_CASE__="[PAD]" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
SCREAMING_SNAKE_CASE__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
SCREAMING_SNAKE_CASE__ : List[Any] = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(10 ):
SCREAMING_SNAKE_CASE__ : Any = F'''[unused{i}]'''
SCREAMING_SNAKE_CASE__ : Tuple = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE__ : Optional[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(SCREAMING_SNAKE_CASE__ )
def __getstate__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
return state
def __setstate__(self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ : Tuple = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ : List[str] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """""".join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , """ """ ).strip()
return out_string
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 545 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE__ : List[str] = 1
SCREAMING_SNAKE_CASE__ : List[Any] = 1
while repunit:
SCREAMING_SNAKE_CASE__ : Dict = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase_ ( _snake_case = 1_000_000 ):
SCREAMING_SNAKE_CASE__ : int = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_snake_case ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 545 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''gptsan-japanese'''
SCREAMING_SNAKE_CASE__ = [
'''past_key_values''',
]
SCREAMING_SNAKE_CASE__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Union[str, Any] , lowercase : Union[str, Any]=3_60_00 , lowercase : Any=12_80 , lowercase : int=10_24 , lowercase : Optional[Any]=81_92 , lowercase : int=40_96 , lowercase : Optional[int]=1_28 , lowercase : List[str]=10 , lowercase : str=0 , lowercase : Optional[Any]=16 , lowercase : int=16 , lowercase : List[str]=1_28 , lowercase : Any=0.0 , lowercase : Optional[int]=1E-5 , lowercase : Tuple=False , lowercase : Any=0.0 , lowercase : Optional[int]="float32" , lowercase : Optional[Any]=False , lowercase : List[str]=False , lowercase : Optional[Any]=False , lowercase : Dict=0.0_0_2 , lowercase : List[Any]=False , lowercase : Union[str, Any]=True , lowercase : List[str]=3_59_98 , lowercase : List[Any]=3_59_95 , lowercase : Any=3_59_99 , **lowercase : int , ):
'''simple docstring'''
UpperCAmelCase : str = vocab_size
UpperCAmelCase : int = max_position_embeddings
UpperCAmelCase : int = d_model
UpperCAmelCase : List[Any] = d_ff
UpperCAmelCase : Dict = d_ext
UpperCAmelCase : Optional[Any] = d_spout
UpperCAmelCase : Union[str, Any] = num_switch_layers
UpperCAmelCase : int = num_ext_layers
UpperCAmelCase : List[str] = num_switch_layers + num_ext_layers
UpperCAmelCase : int = num_heads
UpperCAmelCase : Dict = num_experts
UpperCAmelCase : Any = expert_capacity
UpperCAmelCase : Any = dropout_rate
UpperCAmelCase : Any = layer_norm_epsilon
UpperCAmelCase : Tuple = router_bias
UpperCAmelCase : List[str] = router_jitter_noise
UpperCAmelCase : Tuple = router_dtype
UpperCAmelCase : Optional[Any] = router_ignore_padding_tokens
UpperCAmelCase : Optional[int] = output_hidden_states
UpperCAmelCase : Optional[Any] = output_attentions
UpperCAmelCase : Tuple = initializer_factor
UpperCAmelCase : Any = output_router_logits
UpperCAmelCase : Union[str, Any] = use_cache
super().__init__(
separator_token_id=__UpperCamelCase , pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
| 595 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCAmelCase ( UpperCAmelCase__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> int:
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , UpperCAmelCase__ , )
if isinstance(UpperCAmelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCAmelCase__ , PIL.Image.Image ):
lowerCamelCase_ = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = image[0].size
lowerCamelCase_ , lowerCamelCase_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
lowerCamelCase_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
lowerCamelCase_ = np.concatenate(UpperCAmelCase__ , axis=0 )
lowerCamelCase_ = np.array(UpperCAmelCase__ ).astype(np.floataa ) / 2_5_5.0
lowerCamelCase_ = image.transpose(0 , 3 , 1 , 2 )
lowerCamelCase_ = 2.0 * image - 1.0
lowerCamelCase_ = torch.from_numpy(UpperCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(UpperCAmelCase__ , dim=0 )
return image
def __lowerCAmelCase ( UpperCAmelCase__ : Union[List, PIL.Image.Image, torch.Tensor] ) -> Any:
if isinstance(UpperCAmelCase__ , torch.Tensor ):
return mask
elif isinstance(UpperCAmelCase__ , PIL.Image.Image ):
lowerCamelCase_ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = mask[0].size
lowerCamelCase_ , lowerCamelCase_ = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
lowerCamelCase_ = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
lowerCamelCase_ = np.concatenate(UpperCAmelCase__ , axis=0 )
lowerCamelCase_ = mask.astype(np.floataa ) / 2_5_5.0
lowerCamelCase_ = 0
lowerCamelCase_ = 1
lowerCamelCase_ = torch.from_numpy(UpperCAmelCase__ )
elif isinstance(mask[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(UpperCAmelCase__ , dim=0 )
return mask
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __init__( self : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ):
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self : List[str] , __UpperCamelCase : Union[torch.Tensor, PIL.Image.Image] , __UpperCamelCase : Union[torch.Tensor, PIL.Image.Image] , __UpperCamelCase : int = 2_5_0 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 1_0 , __UpperCamelCase : int = 1_0 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , ):
lowerCamelCase_ = image
lowerCamelCase_ = _preprocess_image(__UpperCamelCase )
lowerCamelCase_ = original_image.to(device=self.device , dtype=self.unet.dtype )
lowerCamelCase_ = _preprocess_mask(__UpperCamelCase )
lowerCamelCase_ = mask_image.to(device=self.device , dtype=self.unet.dtype )
lowerCamelCase_ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase_ = original_image.shape
lowerCamelCase_ = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.device )
lowerCamelCase_ = eta
lowerCamelCase_ = self.scheduler.timesteps[0] + 1
lowerCamelCase_ = generator[0] if isinstance(__UpperCamelCase , __UpperCamelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
lowerCamelCase_ = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# compute previous image: x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
lowerCamelCase_ = self.scheduler.undo_step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ = t
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 272 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowercase = False
lowercase = True
lowercase = False
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowercase = parser.parse_args()
lowercase = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
lowercase = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
lowercase = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
lowercase = reader.read()
lowercase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
lowercase = UNetaDModel(**config)
else:
lowercase = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
lowercase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowercase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowercase = config[key]
del config[key]
lowercase = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
lowercase = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
lowercase = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
lowercase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
lowercase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
lowercase = param_value
lowercase = True
if not has_changed:
lowercase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 714 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 150 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> tuple[float, float]:
# Check if the input is valid
if not len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = equationa
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = equationa
# Calculate the determinants of the matrices
__lowerCamelCase = aa * ba - aa * ba
__lowerCamelCase = ca * ba - ca * ba
__lowerCamelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__lowerCamelCase = determinant_x / determinant
__lowerCamelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 546 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] ="levit"
def __init__( self : Optional[int] , a : int=2_24 , a : Union[str, Any]=3 , a : List[Any]=3 , a : Dict=2 , a : Tuple=1 , a : Optional[int]=16 , a : Optional[Any]=[1_28, 2_56, 3_84] , a : Dict=[4, 8, 12] , a : Optional[Any]=[4, 4, 4] , a : Optional[int]=[16, 16, 16] , a : Any=0 , a : List[Any]=[2, 2, 2] , a : Optional[Any]=[2, 2, 2] , a : Union[str, Any]=0.02 , **a : Optional[int] , ):
"""simple docstring"""
super().__init__(**a )
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = kernel_size
__lowerCamelCase = stride
__lowerCamelCase = padding
__lowerCamelCase = hidden_sizes
__lowerCamelCase = num_attention_heads
__lowerCamelCase = depths
__lowerCamelCase = key_dim
__lowerCamelCase = drop_path_rate
__lowerCamelCase = patch_size
__lowerCamelCase = attention_ratio
__lowerCamelCase = mlp_ratio
__lowerCamelCase = initializer_range
__lowerCamelCase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Tuple =version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return 1e-4
| 546 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCamelCase : Tuple = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_lowerCamelCase : Optional[Any] = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
_lowerCamelCase : Tuple = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def __snake_case( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int = CHRF.CHAR_ORDER , _UpperCamelCase : int = CHRF.WORD_ORDER , _UpperCamelCase : int = CHRF.BETA , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(references[0] )
if any(len(_UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(_UpperCamelCase )]
SCREAMING_SNAKE_CASE = CHRF(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = sb_chrf.corpus_score(_UpperCamelCase , _UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 647 | import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''prophetnet.tokenizer'''}
_lowerCamelCase : str = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_12,
}
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as reader:
SCREAMING_SNAKE_CASE = reader.readlines()
for index, token in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = token.rstrip("\n" )
SCREAMING_SNAKE_CASE = index
return vocab
class lowercase ( a ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : Dict="[SEP]" , _UpperCamelCase : Tuple="[UNK]" , _UpperCamelCase : Dict="[PAD]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , sep_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
SCREAMING_SNAKE_CASE = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
SCREAMING_SNAKE_CASE = F"[unused{i}]"
SCREAMING_SNAKE_CASE = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_UpperCamelCase )
def __getstate__( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : List[Any] , _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def __snake_case( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case( self : str , _UpperCamelCase : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case( self : List[str] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "".join(_UpperCamelCase ).replace(_UpperCamelCase , " " ).strip()
return out_string
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 647 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : Dict = '''sew'''
def __init__( self , UpperCamelCase__=32 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3_072 , UpperCamelCase__=2 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-5 , UpperCamelCase__="group" , UpperCamelCase__="gelu" , UpperCamelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__=False , UpperCamelCase__=128 , UpperCamelCase__=16 , UpperCamelCase__=True , UpperCamelCase__=0.05 , UpperCamelCase__=10 , UpperCamelCase__=2 , UpperCamelCase__=0.0 , UpperCamelCase__=10 , UpperCamelCase__=0 , UpperCamelCase__="mean" , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=256 , UpperCamelCase__=0 , UpperCamelCase__=1 , UpperCamelCase__=2 , **UpperCamelCase__ , ):
"""simple docstring"""
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
a_ = hidden_size
a_ = feat_extract_norm
a_ = feat_extract_activation
a_ = list(A_ )
a_ = list(A_ )
a_ = list(A_ )
a_ = conv_bias
a_ = num_conv_pos_embeddings
a_ = num_conv_pos_embedding_groups
a_ = len(self.conv_dim )
a_ = num_hidden_layers
a_ = intermediate_size
a_ = squeeze_factor
a_ = hidden_act
a_ = num_attention_heads
a_ = hidden_dropout
a_ = attention_dropout
a_ = activation_dropout
a_ = feat_proj_dropout
a_ = final_dropout
a_ = layerdrop
a_ = layer_norm_eps
a_ = initializer_range
a_ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a_ = apply_spec_augment
a_ = mask_time_prob
a_ = mask_time_length
a_ = mask_time_min_masks
a_ = mask_feature_prob
a_ = mask_feature_length
a_ = mask_feature_min_masks
# ctc loss
a_ = ctc_loss_reduction
a_ = ctc_zero_infinity
# sequence classification
a_ = use_weighted_layer_sum
a_ = classifier_proj_size
@property
def _a ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 536 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BertTokenizer
UpperCamelCase = BertTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = filter_non_english
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : Tuple , A_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# With lower casing
lowerCamelCase_ = self.get_tokenizer(do_lower_case=A_ )
lowerCamelCase_ = self.get_rust_tokenizer(do_lower_case=A_ )
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
lowerCamelCase_ = 'a\n\'ll !!to?\'d of, can\'t.'
lowerCamelCase_ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase_ = {}
for i, token in enumerate(A_ ):
lowerCamelCase_ = i
lowerCamelCase_ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('bert-base-uncased' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def a__ ( self : str ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCamelCase_ = tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ['的', '人', '有']
lowerCamelCase_ = ''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
| 70 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_lowerCamelCase = {
'''gpt-neox-20b''': 2048,
}
class UpperCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Tuple = ["input_ids", "attention_mask"]
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase=False , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
a =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
a =getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
a =add_prefix_space
a =pre_tok_class(**_lowerCAmelCase )
a =add_prefix_space
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
a =self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
a =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) + [self.eos_token_id] )
if len(_lowerCAmelCase ) > self.model_max_length:
a =input_ids[-self.model_max_length :]
return input_ids
| 321 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCamelCase ( UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : Tuple=10 , UpperCAmelCase_ : Dict=100 , UpperCAmelCase_ : Optional[int]=1026 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[Any]="data/tokenized_stories_train_wikitext103.jbl" , UpperCAmelCase_ : int="igf_context_pairs.jbl" , )-> Dict:
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
a , a =generate_datasets(
UpperCAmelCase_ , UpperCAmelCase_ , number=UpperCAmelCase_ , min_len=1026 , trim=UpperCAmelCase_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
a =torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
a =load_gpta("""gpt2""" ).to(UpperCAmelCase_ )
print("""computing perplexity on objective set""" )
a =compute_perplexity(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).item()
print("""perplexity on objective set:""" , UpperCAmelCase_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int=15 , UpperCAmelCase_ : Optional[int]=128 , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : List[Any]="igf_model.pt" , )-> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
a =GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
a =SecondaryLearner(UpperCAmelCase_ )
# Train secondary learner
a =train_secondary_learner(
UpperCAmelCase_ , UpperCAmelCase_ , max_epochs=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , eval_freq=100 , igf_model_path=UpperCAmelCase_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : Any=1000 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : Dict=1.0 , UpperCAmelCase_ : str=recopy_gpta , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : List[Any]=10 , UpperCAmelCase_ : int="gpt2_finetuned.pt" , )-> int:
"""simple docstring"""
a =torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
a =RandomSampler(UpperCAmelCase_ )
a =DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ )
a =max_steps // (len(UpperCAmelCase_ )) + 1
a =0
a =torch.zeros((1, context_len) , dtype=torch.long , device=UpperCAmelCase_ )
a , a , a =recopy_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(UpperCAmelCase_ )
secondary_learner.eval()
a =[]
a =0
a =[]
a =[]
# Compute the performance of the transformer model at the beginning
a =compute_perplexity(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
test_perps.append(UpperCAmelCase_ )
print("""Test perplexity, step""" , UpperCAmelCase_ , """:""" , UpperCAmelCase_ )
for epoch in range(int(UpperCAmelCase_ ) ):
for step, example in enumerate(UpperCAmelCase_ ):
torch.cuda.empty_cache()
a =random.randint(0 , example.size(2 ) - context_len - 1 )
a =example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
a =model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
a =True
if secondary_learner is not None:
a =secondary_learner.forward(
torch.tensor(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(UpperCAmelCase_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
a =-1
if predicted_q < threshold:
a =False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
a =outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
a =0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
a =compute_perplexity(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
test_perps.append(UpperCAmelCase_ )
print("""Test perplexity, step""" , UpperCAmelCase_ , """:""" , UpperCAmelCase_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , UpperCAmelCase_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCamelCase ( )-> Any:
"""simple docstring"""
a =argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=UpperCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=UpperCAmelCase_ , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=UpperCAmelCase_ , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1000 , type=UpperCAmelCase_ , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=UpperCAmelCase_ , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=UpperCAmelCase_ , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=UpperCAmelCase_ , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=UpperCAmelCase_ , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1026 , type=UpperCAmelCase_ , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=UpperCAmelCase_ , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=UpperCAmelCase_ , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=UpperCAmelCase_ , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=UpperCAmelCase_ , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
a =joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
a =training_secondary_learner(
UpperCAmelCase_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
a =GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
a , a =generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1026 , trim=UpperCAmelCase_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=UpperCAmelCase_ , secondary_learner=UpperCAmelCase_ , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 321 | 1 |
"""simple docstring"""
import random
from typing import Any
def snake_case ( lowerCAmelCase_ ) -> list[Any]:
for _ in range(len(lowerCAmelCase_ ) ):
_snake_case = random.randint(0 , len(lowerCAmelCase_ ) - 1 )
_snake_case = random.randint(0 , len(lowerCAmelCase_ ) - 1 )
_snake_case , _snake_case = data[b], data[a]
return data
if __name__ == "__main__":
snake_case = [0, 1, 2, 3, 4, 5, 6, 7]
snake_case = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 103 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( __lowercase ):
@staticmethod
@abstractmethod
def A_ ( a ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def A_ ( self ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError()
| 612 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Dict=1_3 , snake_case__ : Tuple=7 , snake_case__ : List[Any]=True , snake_case__ : Any=True , snake_case__ : str=True , snake_case__ : int=True , snake_case__ : Tuple=9_9 , snake_case__ : Union[str, Any]=1_6 , snake_case__ : str=3_6 , snake_case__ : List[str]=6 , snake_case__ : List[Any]=6 , snake_case__ : Dict=6 , snake_case__ : str=3_7 , snake_case__ : Dict="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Any=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Optional[Any]=1_6 , snake_case__ : Tuple=2 , snake_case__ : Tuple=0.02 , snake_case__ : List[str]=3 , snake_case__ : Optional[Any]=4 , snake_case__ : Any=None , ):
'''simple docstring'''
lowercase :Optional[int] = parent
lowercase :int = batch_size
lowercase :Optional[int] = seq_length
lowercase :str = is_training
lowercase :Optional[Any] = use_input_mask
lowercase :List[str] = use_token_type_ids
lowercase :Optional[int] = use_labels
lowercase :Tuple = vocab_size
lowercase :List[str] = embedding_size
lowercase :Tuple = hidden_size
lowercase :List[Any] = num_hidden_layers
lowercase :Dict = num_hidden_groups
lowercase :List[Any] = num_attention_heads
lowercase :str = intermediate_size
lowercase :List[Any] = hidden_act
lowercase :Optional[int] = hidden_dropout_prob
lowercase :Any = attention_probs_dropout_prob
lowercase :Optional[int] = max_position_embeddings
lowercase :Union[str, Any] = type_vocab_size
lowercase :Union[str, Any] = type_sequence_label_size
lowercase :Tuple = initializer_range
lowercase :List[Any] = num_labels
lowercase :Union[str, Any] = num_choices
lowercase :Dict = scope
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase :Dict = None
if self.use_input_mask:
lowercase :Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase :Optional[Any] = None
if self.use_token_type_ids:
lowercase :Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase :List[str] = None
lowercase :Tuple = None
lowercase :Union[str, Any] = None
if self.use_labels:
lowercase :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase :str = ids_tensor([self.batch_size] , self.num_choices )
lowercase :Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __snake_case ( self : int , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Optional[int] ):
'''simple docstring'''
lowercase :str = AlbertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :Dict = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowercase :Tuple = model(snake_case__ , token_type_ids=snake_case__ )
lowercase :Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
'''simple docstring'''
lowercase :Union[str, Any] = AlbertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :Optional[Any] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __snake_case ( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
lowercase :Optional[int] = AlbertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Optional[int] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :Union[str, Any] = AlbertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :int = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : str , snake_case__ : Optional[Any] ):
'''simple docstring'''
lowercase :Union[str, Any] = self.num_labels
lowercase :Optional[Any] = AlbertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
lowercase :int = self.num_labels
lowercase :Union[str, Any] = AlbertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :Optional[Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : List[str] ):
'''simple docstring'''
lowercase :Optional[Any] = self.num_choices
lowercase :Dict = AlbertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase :Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase :Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase :Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase :int = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :str = self.prepare_config_and_inputs()
(
lowercase
) :Optional[int] = config_and_inputs
lowercase :List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : List[Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : List[str] = True
def __snake_case ( self : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any=False ):
'''simple docstring'''
lowercase :Any = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowercase :Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
lowercase :int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Dict = AlbertModelTester(self )
lowercase :str = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __snake_case ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
lowercase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def __snake_case ( self : str ):
'''simple docstring'''
lowercase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase :Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __snake_case ( self : Tuple ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase :Union[str, Any] = AlbertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
lowercase :Dict = AlbertModel.from_pretrained('''albert-base-v2''' )
lowercase :Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase :Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase :List[str] = model(snake_case__ , attention_mask=snake_case__ )[0]
lowercase :Union[str, Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
lowercase :int = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
| 716 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowerCamelCase (a_ :Union[str, Any]) -> Optional[Any]:
for pegasus_name, hf_name in PATTERNS:
lowercase :List[Any] = k.replace(a_ , a_)
return k
def lowerCamelCase (a_ :dict , a_ :dict) -> PegasusForConditionalGeneration:
lowercase :Union[str, Any] = DEFAULTS.copy()
cfg_kwargs.update(a_)
lowercase :Optional[Any] = PegasusConfig(**a_)
lowercase :int = PegasusForConditionalGeneration(a_)
lowercase :Optional[int] = torch_model.model.state_dict()
lowercase :Tuple = {}
for k, v in tf_weights.items():
lowercase :Dict = rename_state_dict_key(a_)
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""")
if "dense" in k or "proj" in new_k:
lowercase :Union[str, Any] = v.T
lowercase :List[str] = torch.tensor(a_ , dtype=sd[new_k].dtype)
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
lowercase :Optional[int] = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1])
lowercase :Union[str, Any] = mapping['''shared.weight''']
lowercase :Dict = mapping['''shared.weight''']
lowercase :int = {k: torch.zeros_like(a_) for k, v in sd.items() if k.endswith('''bias''') and k not in mapping}
mapping.update(**a_)
lowercase , lowercase :Tuple = torch_model.model.load_state_dict(a_ , strict=a_)
lowercase :Dict = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase (a_ :Union[str, Any]="./ckpt/aeslc/model.ckpt-32000") -> Dict:
lowercase :Any = tf.train.list_variables(a_)
lowercase :str = {}
lowercase :Optional[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a_ , desc='''converting tf checkpoint to dict'''):
lowercase :str = any(pat in name for pat in ignore_name)
if skip_key:
continue
lowercase :str = tf.train.load_variable(a_ , a_)
lowercase :Dict = array
return tf_weights
def lowerCamelCase (a_ :str , a_ :str) -> List[str]:
# save tokenizer first
lowercase :List[str] = Path(a_).parent.name
lowercase :List[Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
lowercase :List[Any] = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a_)
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a_)
# convert model
lowercase :Any = get_tf_weights_as_numpy(a_)
lowercase :str = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
lowercase :int = task_specific_params
lowercase :List[Any] = convert_pegasus(a_ , a_)
torch_model.save_pretrained(a_)
lowercase :Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''')
sd.pop('''model.encoder.embed_positions.weight''')
torch.save(a_ , Path(a_) / '''pytorch_model.bin''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCAmelCase = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 475 | 0 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
__SCREAMING_SNAKE_CASE : Any = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : List[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
__SCREAMING_SNAKE_CASE : List[str] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
__SCREAMING_SNAKE_CASE : Tuple = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
__SCREAMING_SNAKE_CASE : List[str] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__SCREAMING_SNAKE_CASE : int = '''fp16'''
self.assertTrue(is_safetensors_compatible(_lowerCamelCase , variant=_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : List[str] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__SCREAMING_SNAKE_CASE : List[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(_lowerCamelCase , variant=_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
# pass variant but use the non-variant filenames
__SCREAMING_SNAKE_CASE : List[str] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
__SCREAMING_SNAKE_CASE : Dict = '''fp16'''
self.assertTrue(is_safetensors_compatible(_lowerCamelCase , variant=_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__SCREAMING_SNAKE_CASE : int = '''fp16'''
self.assertFalse(is_safetensors_compatible(_lowerCamelCase , variant=_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( self :str ):
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
__SCREAMING_SNAKE_CASE : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(_lowerCamelCase , variant=_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
# pass variant but use the non-variant filenames
__SCREAMING_SNAKE_CASE : List[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
__SCREAMING_SNAKE_CASE : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(_lowerCamelCase , variant=_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__SCREAMING_SNAKE_CASE : List[Any] = '''fp16'''
self.assertFalse(is_safetensors_compatible(_lowerCamelCase , variant=_lowerCamelCase ) )
| 674 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : Tuple = 0
def SCREAMING_SNAKE_CASE_ ( self :Any ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :int ):
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Optional[Any] = Path(_lowerCamelCase ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE : Dict = Path(_lowerCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowerCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowerCamelCase , '''w''' ) )
__SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Tuple = Path(_lowerCamelCase ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = Path(_lowerCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowerCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowerCamelCase , '''w''' ) )
__SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :int ):
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : str = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__SCREAMING_SNAKE_CASE : Tuple = Path(_lowerCamelCase ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE : Any = Path(_lowerCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowerCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowerCamelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(_lowerCamelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPImageProcessor(**_lowerCamelCase )
# save in new folder
model_config.save_pretrained(_lowerCamelCase )
config.save_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(_lowerCamelCase )
# make sure private variable is not incorrectly saved
__SCREAMING_SNAKE_CASE : Tuple = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Dict = Path(_lowerCamelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowerCamelCase , '''w''' ) , )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
with self.assertRaisesRegex(
_lowerCamelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
__SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained('''clip-base''' )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
with self.assertRaisesRegex(
_lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(_lowerCamelCase , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
with self.assertRaisesRegex(
_lowerCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(_lowerCamelCase , trust_remote_code=_lowerCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
try:
AutoConfig.register('''custom''' , _lowerCamelCase )
AutoImageProcessor.register(_lowerCamelCase , _lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCamelCase ):
AutoImageProcessor.register(_lowerCamelCase , _lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Dict = Path(_lowerCamelCase ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE : Dict = Path(_lowerCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowerCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowerCamelCase , '''w''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = CustomImageProcessor.from_pretrained(_lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = True
try:
AutoConfig.register('''custom''' , _lowerCamelCase )
AutoImageProcessor.register(_lowerCamelCase , _lowerCamelCase )
# If remote code is not set, the default is to use local
__SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_lowerCamelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 674 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCamelCase :
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : Tuple , _snake_case : Dict , _snake_case : int ) -> str:
return None
class lowerCamelCase :
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : str ) -> Tuple:
return None
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
a = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCAmelCase_ ( self : str ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_snake_case , "tf" , 12 , **_snake_case )
@require_torch
@slow
def lowerCAmelCase_ ( self : Dict ) -> List[str]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_snake_case , "pt" , 12 , **_snake_case )
@require_torch
@slow
def lowerCAmelCase_ ( self : List[str] ) -> Optional[int]:
from transformers import BertModel
SCREAMING_SNAKE_CASE__ = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(_snake_case ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE__ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE__ = BertModel(BertConfig(vocab_size=len(_snake_case ) ) )
model.save_pretrained(_snake_case )
self._test_export(_snake_case , "pt" , 12 , _snake_case )
@require_tf
@slow
def lowerCAmelCase_ ( self : int ) -> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE__ = self._test_export(_snake_case , "tf" , 12 , **_snake_case )
SCREAMING_SNAKE_CASE__ = quantize(Path(_snake_case ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_snake_case ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def lowerCAmelCase_ ( self : str ) -> Tuple:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE__ = self._test_export(_snake_case , "pt" , 12 , **_snake_case )
SCREAMING_SNAKE_CASE__ = quantize(_snake_case )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_snake_case ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def lowerCAmelCase_ ( self : Tuple , _snake_case : str , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Union[str, Any]=None , **_snake_case : Union[str, Any] ) -> Optional[int]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE__ = Path(_snake_case ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case )
return path
except Exception as e:
self.fail(_snake_case )
@require_torch
@require_tokenizers
@slow
def lowerCAmelCase_ ( self : Dict ) -> Any:
from transformers import BertModel
SCREAMING_SNAKE_CASE__ = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(_snake_case , _snake_case , "pt" )
@require_tf
@require_tokenizers
@slow
def lowerCAmelCase_ ( self : Optional[int] ) -> Dict:
from transformers import TFBertModel
SCREAMING_SNAKE_CASE__ = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(_snake_case , _snake_case , "tf" )
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : Dict , _snake_case : str , _snake_case : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = FeatureExtractionPipeline(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = infer_shapes(_snake_case , _snake_case )
# Assert all variables are present
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , _snake_case )
self.assertSequenceEqual(variable_names[3:] , _snake_case )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def lowerCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = ["input_ids", "attention_mask", "token_type_ids"]
SCREAMING_SNAKE_CASE__ = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = ensure_valid_input(FuncContiguousArgs() , _snake_case , _snake_case )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_snake_case ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(_snake_case ) , set(_snake_case ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_snake_case , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = ensure_valid_input(FuncNonContiguousArgs() , _snake_case , _snake_case )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_snake_case ) , 1 )
self.assertEqual(len(_snake_case ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def lowerCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 538 | """simple docstring"""
import sys
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [[0 for x in range(__UpperCAmelCase )] for x in range(__UpperCAmelCase )]
SCREAMING_SNAKE_CASE__ = [[0 for x in range(__UpperCAmelCase )] for x in range(__UpperCAmelCase )]
for chain_length in range(2 , __UpperCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
SCREAMING_SNAKE_CASE__ = a + chain_length - 1
SCREAMING_SNAKE_CASE__ = sys.maxsize
for c in range(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
SCREAMING_SNAKE_CASE__ = cost
SCREAMING_SNAKE_CASE__ = c
return matrix, sol
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
if i == j:
print("A" + str(__UpperCAmelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__UpperCAmelCase , __UpperCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(__UpperCAmelCase , optimal_solution[i][j] + 1 , __UpperCAmelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE ( ) -> Any:
SCREAMING_SNAKE_CASE__ = [30, 35, 15, 5, 10, 20, 25]
SCREAMING_SNAKE_CASE__ = len(__UpperCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = matrix_chain_order(__UpperCAmelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__UpperCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 538 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Any ='''cvt'''
def __init__( self :Dict, snake_case :Tuple=3, snake_case :Optional[int]=[7, 3, 3], snake_case :int=[4, 2, 2], snake_case :Dict=[2, 1, 1], snake_case :Tuple=[64, 192, 384], snake_case :Optional[Any]=[1, 3, 6], snake_case :Any=[1, 2, 10], snake_case :Optional[Any]=[4.0, 4.0, 4.0], snake_case :Any=[0.0, 0.0, 0.0], snake_case :List[Any]=[0.0, 0.0, 0.0], snake_case :str=[0.0, 0.0, 0.1], snake_case :Any=[True, True, True], snake_case :Dict=[False, False, True], snake_case :Dict=["dw_bn", "dw_bn", "dw_bn"], snake_case :int=[3, 3, 3], snake_case :List[str]=[1, 1, 1], snake_case :Optional[Any]=[2, 2, 2], snake_case :str=[1, 1, 1], snake_case :Optional[Any]=[1, 1, 1], snake_case :int=0.0_2, snake_case :List[str]=1e-1_2, **snake_case :Dict, ):
"""simple docstring"""
super().__init__(**snake_case)
_lowercase =num_channels
_lowercase =patch_sizes
_lowercase =patch_stride
_lowercase =patch_padding
_lowercase =embed_dim
_lowercase =num_heads
_lowercase =depth
_lowercase =mlp_ratio
_lowercase =attention_drop_rate
_lowercase =drop_rate
_lowercase =drop_path_rate
_lowercase =qkv_bias
_lowercase =cls_token
_lowercase =qkv_projection_method
_lowercase =kernel_qkv
_lowercase =padding_kv
_lowercase =stride_kv
_lowercase =padding_q
_lowercase =stride_q
_lowercase =initializer_range
_lowercase =layer_norm_eps
| 181 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 169 | 0 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 42 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused')
__A : str = load_dataset('ashraq/esc50')
__A : str = dataset['train']['audio'][-1]['array']
__A : Union[str, Any] = audio_classifier(_UpperCAmelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(_UpperCAmelCase) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
__A : Any = load_dataset('ashraq/esc50')
__A : Dict = dataset['train']['audio'][-1]['array']
__A : Tuple = audio_classifier(_UpperCAmelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(_UpperCAmelCase) , [
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
] , )
__A : Tuple = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'])
self.assertEqual(
nested_simplify(_UpperCAmelCase) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
__A : List[str] = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5)
self.assertEqual(
nested_simplify(_UpperCAmelCase) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass | 8 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''lxmert'''
lowerCAmelCase = {}
def __init__( self , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = vocab_size
__A : int = hidden_size
__A : str = num_attention_heads
__A : Tuple = hidden_act
__A : int = intermediate_size
__A : str = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : Optional[int] = initializer_range
__A : Any = layer_norm_eps
__A : Optional[Any] = num_qa_labels
__A : Optional[int] = num_object_labels
__A : Any = num_attr_labels
__A : Union[str, Any] = l_layers
__A : Optional[int] = x_layers
__A : List[Any] = r_layers
__A : Tuple = visual_feat_dim
__A : Tuple = visual_pos_dim
__A : Optional[int] = visual_loss_normalizer
__A : int = task_matched
__A : List[Any] = task_mask_lm
__A : Optional[Any] = task_obj_predict
__A : str = task_qa
__A : List[Any] = visual_obj_loss
__A : Optional[Any] = visual_attr_loss
__A : Union[str, Any] = visual_feat_loss
__A : Union[str, Any] = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**_UpperCAmelCase) | 8 | 1 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Any = 42
lowerCamelCase : Union[str, Any] = jnp.floataa
lowerCamelCase : Optional[int] = True
def __UpperCAmelCase ( self : List[str] ) -> Dict:
super().setup()
lowerCAmelCase = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Tuple ) -> Optional[Any]:
lowerCAmelCase = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Any = FlaxBigBirdForNaturalQuestionsModule
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : Dict ):
def cross_entropy(lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : Any=None ):
lowerCAmelCase = logits.shape[-1]
lowerCAmelCase = (labels[..., None] == jnp.arange(_snake_case )[None]).astype('f4' )
lowerCAmelCase = jax.nn.log_softmax(_snake_case , axis=-1 )
lowerCAmelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCAmelCase = reduction(_snake_case )
return loss
lowerCAmelCase = partial(_snake_case , reduction=jnp.mean )
lowerCAmelCase = cross_entropy(_snake_case , _snake_case )
lowerCAmelCase = cross_entropy(_snake_case , _snake_case )
lowerCAmelCase = cross_entropy(_snake_case , _snake_case )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : Dict = '''google/bigbird-roberta-base'''
lowerCamelCase : Any = 3_000
lowerCamelCase : List[Any] = 10_500
lowerCamelCase : str = 128
lowerCamelCase : Optional[Any] = 3
lowerCamelCase : List[Any] = 1
lowerCamelCase : Union[str, Any] = 5
# tx_args
lowerCamelCase : List[str] = 3E-5
lowerCamelCase : Optional[Any] = 0.0
lowerCamelCase : Tuple = 20_000
lowerCamelCase : str = 0.0_0_9_5
lowerCamelCase : List[Any] = '''bigbird-roberta-natural-questions'''
lowerCamelCase : Dict = '''training-expt'''
lowerCamelCase : List[str] = '''data/nq-training.jsonl'''
lowerCamelCase : str = '''data/nq-validation.jsonl'''
def __UpperCAmelCase ( self : str ) -> List[Any]:
os.makedirs(self.base_dir , exist_ok=UpperCAmelCase__ )
lowerCAmelCase = os.path.join(self.base_dir , self.save_dir )
lowerCAmelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : Dict = 42
lowerCamelCase : Optional[Any] = 4_096 # no dynamic padding on TPUs
def __call__( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase = self.collate_fn(UpperCAmelCase__ )
lowerCAmelCase = jax.tree_util.tree_map(UpperCAmelCase__ , UpperCAmelCase__ )
return batch
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = self.fetch_inputs(features['input_ids'] )
lowerCAmelCase = {
'input_ids': jnp.array(UpperCAmelCase__ , dtype=jnp.intaa ),
'attention_mask': jnp.array(UpperCAmelCase__ , dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa ),
}
return batch
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Tuple ) -> Union[str, Any]:
lowerCAmelCase = [self._fetch_inputs(UpperCAmelCase__ ) for ids in input_ids]
return zip(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
lowerCAmelCase = [1 for _ in range(len(UpperCAmelCase__ ) )]
while len(UpperCAmelCase__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def a_ ( lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Tuple=None ):
if seed is not None:
lowerCAmelCase = dataset.shuffle(seed=_snake_case )
for i in range(len(_snake_case ) // batch_size ):
lowerCAmelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_snake_case )
@partial(jax.pmap , axis_name='batch' )
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , **lowerCamelCase : Optional[int] ):
def loss_fn(lowerCamelCase : Optional[int] ):
lowerCAmelCase = model_inputs.pop('start_labels' )
lowerCAmelCase = model_inputs.pop('end_labels' )
lowerCAmelCase = model_inputs.pop('pooled_labels' )
lowerCAmelCase = state.apply_fn(**_snake_case , params=_snake_case , dropout_rng=_snake_case , train=_snake_case )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = outputs
return state.loss_fn(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , )
lowerCAmelCase , lowerCAmelCase = jax.random.split(_snake_case )
lowerCAmelCase = jax.value_and_grad(_snake_case )
lowerCAmelCase , lowerCAmelCase = grad_fn(state.params )
lowerCAmelCase = jax.lax.pmean({'loss': loss} , axis_name='batch' )
lowerCAmelCase = jax.lax.pmean(_snake_case , 'batch' )
lowerCAmelCase = state.apply_gradients(grads=_snake_case )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def a_ ( lowerCamelCase : Optional[Any] , **lowerCamelCase : str ):
lowerCAmelCase = model_inputs.pop('start_labels' )
lowerCAmelCase = model_inputs.pop('end_labels' )
lowerCAmelCase = model_inputs.pop('pooled_labels' )
lowerCAmelCase = state.apply_fn(**_snake_case , params=state.params , train=_snake_case )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = outputs
lowerCAmelCase = state.loss_fn(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
lowerCAmelCase = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class UpperCAmelCase_ ( train_state.TrainState ):
lowerCamelCase : Dict = struct.field(pytree_node=__lowercase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : Tuple = 42
lowerCamelCase : Dict = 42
lowerCamelCase : Tuple = 42
lowerCamelCase : Optional[int] = 42
lowerCamelCase : Dict = 42
lowerCamelCase : Union[str, Any] = 42
lowerCamelCase : Optional[Any] = None
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=None ) -> int:
lowerCAmelCase = model.params
lowerCAmelCase = TrainState.create(
apply_fn=model.__call__ , params=UpperCAmelCase__ , tx=UpperCAmelCase__ , loss_fn=UpperCAmelCase__ , )
if ckpt_dir is not None:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = restore_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
lowerCAmelCase , lowerCAmelCase = build_tx(**UpperCAmelCase__ )
lowerCAmelCase = train_state.TrainState(
step=UpperCAmelCase__ , apply_fn=model.__call__ , params=UpperCAmelCase__ , tx=UpperCAmelCase__ , opt_state=UpperCAmelCase__ , )
lowerCAmelCase = args
lowerCAmelCase = data_collator
lowerCAmelCase = lr
lowerCAmelCase = params
lowerCAmelCase = jax_utils.replicate(UpperCAmelCase__ )
return state
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = self.args
lowerCAmelCase = len(UpperCAmelCase__ ) // args.batch_size
lowerCAmelCase = jax.random.PRNGKey(0 )
lowerCAmelCase = jax.random.split(UpperCAmelCase__ , jax.device_count() )
for epoch in range(args.max_epochs ):
lowerCAmelCase = jnp.array(0 , dtype=jnp.floataa )
lowerCAmelCase = get_batched_dataset(UpperCAmelCase__ , args.batch_size , seed=UpperCAmelCase__ )
lowerCAmelCase = 0
for batch in tqdm(UpperCAmelCase__ , total=UpperCAmelCase__ , desc=F'''Running EPOCH-{epoch}''' ):
lowerCAmelCase = self.data_collator(UpperCAmelCase__ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.train_step_fn(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
lowerCAmelCase = jax_utils.unreplicate(state.step )
lowerCAmelCase = running_loss.item() / i
lowerCAmelCase = self.scheduler_fn(state_step - 1 )
lowerCAmelCase = self.evaluate(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(UpperCAmelCase__ ) )
self.logger.log(UpperCAmelCase__ , commit=UpperCAmelCase__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> List[str]:
lowerCAmelCase = get_batched_dataset(UpperCAmelCase__ , self.args.batch_size )
lowerCAmelCase = len(UpperCAmelCase__ ) // self.args.batch_size
lowerCAmelCase = jnp.array(0 , dtype=jnp.floataa )
lowerCAmelCase = 0
for batch in tqdm(UpperCAmelCase__ , total=UpperCAmelCase__ , desc='Evaluating ... ' ):
lowerCAmelCase = self.data_collator(UpperCAmelCase__ )
lowerCAmelCase = self.val_step_fn(UpperCAmelCase__ , **UpperCAmelCase__ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ) -> Tuple:
lowerCAmelCase = jax_utils.unreplicate(UpperCAmelCase__ )
print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=' ... ' )
self.model_save_fn(UpperCAmelCase__ , params=state.params )
with open(os.path.join(UpperCAmelCase__ , 'opt_state.msgpack' ) , 'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(UpperCAmelCase__ , 'args.joblib' ) )
joblib.dump(self.data_collator , os.path.join(UpperCAmelCase__ , 'data_collator.joblib' ) )
with open(os.path.join(UpperCAmelCase__ , 'training_state.json' ) , 'w' ) as f:
json.dump({'step': state.step.item()} , UpperCAmelCase__ )
print('DONE' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] ):
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=' ... ' )
with open(os.path.join(_snake_case , 'flax_model.msgpack' ) , 'rb' ) as f:
lowerCAmelCase = from_bytes(state.params , f.read() )
with open(os.path.join(_snake_case , 'opt_state.msgpack' ) , 'rb' ) as f:
lowerCAmelCase = from_bytes(state.opt_state , f.read() )
lowerCAmelCase = joblib.load(os.path.join(_snake_case , 'args.joblib' ) )
lowerCAmelCase = joblib.load(os.path.join(_snake_case , 'data_collator.joblib' ) )
with open(os.path.join(_snake_case , 'training_state.json' ) , 'r' ) as f:
lowerCAmelCase = json.load(_snake_case )
lowerCAmelCase = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def a_ ( lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] ):
lowerCAmelCase = num_train_steps - warmup_steps
lowerCAmelCase = optax.linear_schedule(init_value=_snake_case , end_value=_snake_case , transition_steps=_snake_case )
lowerCAmelCase = optax.linear_schedule(init_value=_snake_case , end_value=1e-7 , transition_steps=_snake_case )
lowerCAmelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def a_ ( lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] ):
def weight_decay_mask(lowerCamelCase : Optional[int] ):
lowerCAmelCase = traverse_util.flatten_dict(_snake_case )
lowerCAmelCase = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(_snake_case )
lowerCAmelCase = scheduler_fn(_snake_case , _snake_case , _snake_case , _snake_case )
lowerCAmelCase = optax.adamw(learning_rate=_snake_case , weight_decay=_snake_case , mask=_snake_case )
return tx, lr
| 708 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case ={
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""ChineseCLIPFeatureExtractor"""]
__snake_case =["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 513 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = image.size
lowerCAmelCase__ , lowerCAmelCase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase__ = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
lowerCAmelCase__ = np.array(lowerCAmelCase_ ).astype(np.floataa ) / 255.0
lowerCAmelCase__ = image[None].transpose(0 , 3 , 1 , 2 )
lowerCAmelCase__ = torch.from_numpy(lowerCAmelCase_ )
return 2.0 * image - 1.0
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : VQModel , SCREAMING_SNAKE_CASE__ : UNetaDModel , SCREAMING_SNAKE_CASE__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> List[str]:
super().__init__()
self.register_modules(vqvae=SCREAMING_SNAKE_CASE__ , unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[torch.Tensor, PIL.Image.Image] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = 1 , SCREAMING_SNAKE_CASE__ : Optional[int] = 100 , SCREAMING_SNAKE_CASE__ : Optional[float] = 0.0 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCAmelCase__ = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCAmelCase__ = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE__ )}' )
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowerCAmelCase__ = preprocess(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ , lowerCAmelCase__ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCAmelCase__ = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCAmelCase__ = next(self.unet.parameters() ).dtype
lowerCAmelCase__ = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = image.to(device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=self.device )
lowerCAmelCase__ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase__ = {}
if accepts_eta:
lowerCAmelCase__ = eta
for t in self.progress_bar(SCREAMING_SNAKE_CASE__ ):
# concat latents and low resolution image in the channel dimension.
lowerCAmelCase__ = torch.cat([latents, image] , dim=1 )
lowerCAmelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# predict the noise residual
lowerCAmelCase__ = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
# decode the image latents with the VQVAE
lowerCAmelCase__ = self.vqvae.decode(SCREAMING_SNAKE_CASE__ ).sample
lowerCAmelCase__ = torch.clamp(SCREAMING_SNAKE_CASE__ , -1.0 , 1.0 )
lowerCAmelCase__ = image / 2 + 0.5
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 61 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "vit_msn"
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple=768 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : List[str]=12 , SCREAMING_SNAKE_CASE__ : Dict=3_072 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-0_6 , SCREAMING_SNAKE_CASE__ : Dict=224 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Dict=True , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = qkv_bias
| 61 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ (__a : str , __a : Optional[Any] , __a : Union[str, Any] ):
"""simple docstring"""
_a : int = UniSpeechSatForSequenceClassification.from_pretrained(__a , config=__a )
_a : List[str] = downstream_dict['projector.weight']
_a : Tuple = downstream_dict['projector.bias']
_a : str = downstream_dict['model.post_net.linear.weight']
_a : List[str] = downstream_dict['model.post_net.linear.bias']
return model
def UpperCAmelCase_ (__a : Union[str, Any] , __a : Dict , __a : Optional[int] ):
"""simple docstring"""
_a : str = UniSpeechSatForAudioFrameClassification.from_pretrained(__a , config=__a )
_a : List[str] = downstream_dict['model.linear.weight']
_a : List[str] = downstream_dict['model.linear.bias']
return model
def UpperCAmelCase_ (__a : List[str] , __a : Union[str, Any] , __a : Optional[int] ):
"""simple docstring"""
_a : str = UniSpeechSatForXVector.from_pretrained(__a , config=__a )
_a : Any = downstream_dict['connector.weight']
_a : Optional[int] = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_a : Optional[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
_a : Any = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
_a : Any = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
_a : Optional[int] = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
_a : Tuple = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
_a : Tuple = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
_a : str = downstream_dict['objective.W']
return model
@torch.no_grad()
def UpperCAmelCase_ (__a : List[Any] , __a : Tuple , __a : Dict , __a : Tuple ):
"""simple docstring"""
_a : Union[str, Any] = torch.load(__a , map_location='cpu' )
_a : Union[str, Any] = checkpoint['Downstream']
_a : Optional[Any] = UniSpeechSatConfig.from_pretrained(__a )
_a : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
__a , return_attention_mask=__a , do_normalize=__a )
_a : Optional[Any] = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
_a : List[Any] = convert_classification(__a , __a , __a )
elif arch.endswith('ForAudioFrameClassification' ):
_a : Tuple = convert_diarization(__a , __a , __a )
elif arch.endswith('ForXVector' ):
_a : Tuple = convert_xvector(__a , __a , __a )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
_a : str = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(__a )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
__lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 703 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
_a : List[Any] = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(__a )
# Let's go
_a : Optional[Any] = parser.parse_args()
if not hasattr(__a , 'func' ):
parser.print_help()
exit(1 )
# Run
_a : Optional[Any] = args.func(__a )
service.run()
if __name__ == "__main__":
main()
| 319 | 0 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
UpperCAmelCase_ = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
UpperCAmelCase_ = {
"ctrl": 256,
}
UpperCAmelCase_ = {
"Pregnancy": 16_8629,
"Christianity": 7675,
"Explain": 10_6423,
"Fitness": 6_3440,
"Saving": 6_3163,
"Ask": 2_7171,
"Ass": 9_5985,
"Joke": 16_3509,
"Questions": 4_5622,
"Thoughts": 4_9605,
"Retail": 5_2342,
"Feminism": 16_4338,
"Writing": 1_1992,
"Atheism": 19_2263,
"Netflix": 4_8616,
"Computing": 3_9639,
"Opinion": 4_3213,
"Alone": 4_4967,
"Funny": 5_8917,
"Gaming": 4_0358,
"Human": 4088,
"India": 1331,
"Joker": 7_7138,
"Diet": 3_6206,
"Legal": 1_1859,
"Norman": 4939,
"Tip": 7_2689,
"Weight": 5_2343,
"Movies": 4_6273,
"Running": 2_3425,
"Science": 2090,
"Horror": 3_7793,
"Confession": 6_0572,
"Finance": 1_2250,
"Politics": 1_6360,
"Scary": 19_1985,
"Support": 1_2654,
"Technologies": 3_2516,
"Teenage": 6_6160,
"Event": 3_2769,
"Learned": 6_7460,
"Notion": 18_2770,
"Wikipedia": 3_7583,
"Books": 6665,
"Extract": 7_6050,
"Confessions": 10_2701,
"Conspiracy": 7_5932,
"Links": 6_3674,
"Narcissus": 15_0425,
"Relationship": 5_4766,
"Relationships": 13_4796,
"Reviews": 4_1671,
"News": 4256,
"Translation": 2_6820,
"multilingual": 12_8406,
}
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Any:
UpperCamelCase__ : Optional[int] = set()
UpperCamelCase__ : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase__ : List[Any] = char
UpperCamelCase__ : List[Any] = set(SCREAMING_SNAKE_CASE__ )
return pairs
class lowercase__ ( _UpperCamelCase ):
'''simple docstring'''
a : Dict = VOCAB_FILES_NAMES
a : Tuple = PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Any = CONTROL_CODES
def __init__( self, __magic_name__, __magic_name__, __magic_name__="<unk>", **__magic_name__ ) -> List[Any]:
"""simple docstring"""
super().__init__(unk_token=__magic_name__, **__magic_name__ )
with open(__magic_name__, encoding='''utf-8''' ) as vocab_handle:
UpperCamelCase__ : List[Any] = json.load(__magic_name__ )
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
with open(__magic_name__, encoding='''utf-8''' ) as merges_handle:
UpperCamelCase__ : Dict = merges_handle.read().split('''\n''' )[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split() ) for merge in merges]
UpperCamelCase__ : Optional[int] = dict(zip(__magic_name__, range(len(__magic_name__ ) ) ) )
UpperCamelCase__ : Union[str, Any] = {}
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return dict(self.encoder, **self.added_tokens_encoder )
def UpperCamelCase__ ( self, __magic_name__ ) -> List[str]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Tuple = tuple(__magic_name__ )
UpperCamelCase__ : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
UpperCamelCase__ : Any = get_pairs(__magic_name__ )
if not pairs:
return token
while True:
UpperCamelCase__ : str = min(__magic_name__, key=lambda __magic_name__ : self.bpe_ranks.get(__magic_name__, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase__ : str = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : int = 0
while i < len(__magic_name__ ):
try:
UpperCamelCase__ : Tuple = word.index(__magic_name__, __magic_name__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase__ : Tuple = j
if word[i] == first and i < len(__magic_name__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase__ : Union[str, Any] = tuple(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = new_word
if len(__magic_name__ ) == 1:
break
else:
UpperCamelCase__ : List[Any] = get_pairs(__magic_name__ )
UpperCamelCase__ : Any = "@@ ".join(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = word[:-4]
UpperCamelCase__ : Optional[int] = word
return word
def UpperCamelCase__ ( self, __magic_name__ ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = re.findall(R'''\S+\n?''', __magic_name__ )
for token in words:
split_tokens.extend(list(self.bpe(__magic_name__ ).split(''' ''' ) ) )
return split_tokens
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder.get(__magic_name__, self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
return self.decoder.get(__magic_name__, self.unk_token )
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
UpperCamelCase__ : Dict = " ".join(__magic_name__ ).replace('''@@ ''', '''''' ).strip()
return out_string
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Optional[int]:
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ : Optional[Any] = os.path.join(
__magic_name__, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ : Tuple = os.path.join(
__magic_name__, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__magic_name__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=__magic_name__, ensure_ascii=__magic_name__ ) + '''\n''' )
UpperCamelCase__ : int = 0
with open(__magic_name__, '''w''', encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
UpperCamelCase__ : Optional[int] = token_index
writer.write(''' '''.join(__magic_name__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 253 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __snake_case ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
_UpperCAmelCase : int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert("RGB" )
return image
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Tuple = val
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_UpperCAmelCase : List[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_UpperCAmelCase : List[str] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_UpperCAmelCase : List[Any] = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE__ , requires_grad=SCREAMING_SNAKE_CASE__ ), v_bias) )
_UpperCAmelCase : Union[str, Any] = qkv_bias
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ) -> str:
'''simple docstring'''
_UpperCAmelCase : int = 364 if "coco" in model_name else 224
_UpperCAmelCase : Dict = BlipaVisionConfig(image_size=SCREAMING_SNAKE_CASE__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_UpperCAmelCase : Any = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=SCREAMING_SNAKE_CASE__ ).to_dict()
elif "opt-6.7b" in model_name:
_UpperCAmelCase : List[Any] = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=SCREAMING_SNAKE_CASE__ ).to_dict()
elif "t5-xl" in model_name:
_UpperCAmelCase : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_UpperCAmelCase : int = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
_UpperCAmelCase : Optional[Any] = BlipaConfig(vision_config=SCREAMING_SNAKE_CASE__ , text_config=SCREAMING_SNAKE_CASE__ )
return config, image_size
@torch.no_grad()
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
_UpperCAmelCase : Optional[int] = tokenizer("\n" , add_special_tokens=SCREAMING_SNAKE_CASE__ ).input_ids[0]
_UpperCAmelCase , _UpperCAmelCase : int = get_blipa_config(SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = BlipaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval()
_UpperCAmelCase : Optional[int] = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
_UpperCAmelCase , _UpperCAmelCase : str = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
_UpperCAmelCase : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = load_model_and_preprocess(
name=SCREAMING_SNAKE_CASE__ , model_type=SCREAMING_SNAKE_CASE__ , is_eval=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
original_model.eval()
print("Done!" )
# update state dict keys
_UpperCAmelCase : Optional[Any] = original_model.state_dict()
_UpperCAmelCase : Union[str, Any] = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_UpperCAmelCase : Tuple = state_dict.pop(SCREAMING_SNAKE_CASE__ )
if key.startswith("Qformer.bert" ):
_UpperCAmelCase : List[Any] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
_UpperCAmelCase : Dict = key.replace("self" , "attention" )
if "opt_proj" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
_UpperCAmelCase : Optional[Any] = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
_UpperCAmelCase : Dict = key.replace("opt" , "language" )
if key.startswith("t5" ):
_UpperCAmelCase : Union[str, Any] = key.replace("t5" , "language" )
_UpperCAmelCase : List[str] = val
# read in qv biases
read_in_q_v_bias(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_UpperCAmelCase : Dict = load_demo_image()
_UpperCAmelCase : List[Any] = vis_processors["eval"](SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(SCREAMING_SNAKE_CASE__ )
# create processor
_UpperCAmelCase : Any = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = BlipaProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values.to(SCREAMING_SNAKE_CASE__ )
# make sure processor creates exact same pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
original_model.to(SCREAMING_SNAKE_CASE__ )
hf_model.to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
if "opt" in model_name:
_UpperCAmelCase : str = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
_UpperCAmelCase : Optional[int] = hf_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).logits
else:
_UpperCAmelCase : List[Any] = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
_UpperCAmelCase : List[str] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
_UpperCAmelCase : Union[str, Any] = hf_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_UpperCAmelCase : List[str] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=SCREAMING_SNAKE_CASE__ )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_UpperCAmelCase : Any = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=SCREAMING_SNAKE_CASE__ )
else:
# cast to same type
_UpperCAmelCase : List[str] = logits.dtype
assert torch.allclose(original_logits.to(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
_UpperCAmelCase : int = ""
_UpperCAmelCase : Any = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).input_ids.to(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = original_model.generate({"image": original_pixel_values} )
_UpperCAmelCase : int = hf_model.generate(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : str = input_ids.shape[1]
_UpperCAmelCase : List[Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = [text.strip() for text in output_text]
print("HF generation:" , SCREAMING_SNAKE_CASE__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
_lowerCAmelCase : List[Any] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_lowerCAmelCase : str = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 289 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__lowerCAmelCase : Any = 5_0003
__lowerCAmelCase : List[Any] = 5_0002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( __snake_case , unittest.TestCase ):
__lowerCamelCase = PLBartTokenizer
__lowerCamelCase = None
__lowerCamelCase = False
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case: Any = PLBartTokenizer(__lowerCamelCase , language_codes="""base""" , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
snake_case: Tuple = PLBartTokenizer(__lowerCamelCase , language_codes="""base""" , keep_accents=__lowerCamelCase )
snake_case: Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
snake_case: List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case: str = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
snake_case: Optional[int] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
snake_case: List[Any] = tokenizer.vocab_size
snake_case: Any = [tokenizer.convert_ids_to_tokens(__lowerCamelCase ) for x in range(end - 4 , __lowerCamelCase )]
self.assertListEqual(__lowerCamelCase , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
snake_case: Optional[Any] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
snake_case: Optional[Any] = tokenizer(__lowerCamelCase ).input_ids
self.assertEqual(
tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) , __lowerCamelCase , )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case: List[str] = PLBartTokenizer(__lowerCamelCase , language_codes="""multi""" , keep_accents=__lowerCamelCase )
snake_case: Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
snake_case: Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case: List[Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
snake_case: List[str] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
snake_case: List[Any] = tokenizer.vocab_size
snake_case: List[str] = [tokenizer.convert_ids_to_tokens(__lowerCamelCase ) for x in range(end - 7 , __lowerCamelCase )]
self.assertListEqual(
__lowerCamelCase , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
snake_case: str = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
snake_case: Dict = tokenizer(__lowerCamelCase ).input_ids
self.assertEqual(
tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) , __lowerCamelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
__lowerCamelCase = 'uclanlp/plbart-python-en_XX'
__lowerCamelCase = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__lowerCamelCase = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__lowerCamelCase = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase_ ( cls ) -> str:
'''simple docstring'''
snake_case: PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
snake_case: Any = 1
return cls
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case: Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids )
snake_case: Tuple = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
snake_case: int = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
snake_case: Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
snake_case: Optional[Any] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , __lowerCamelCase )
snake_case: Union[str, Any] = 10
snake_case: Any = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case: Optional[int] = tempfile.mkdtemp()
snake_case: Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCamelCase )
snake_case: Union[str, Any] = PLBartTokenizer.from_pretrained(__lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase )
@require_torch
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
snake_case: Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors="""pt""" )
snake_case: str = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __lowerCamelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
snake_case: Any = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case: str = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
snake_case: Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
snake_case: Optional[int] = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors="""pt""" )
snake_case: Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=10 , return_tensors="""pt""" )
snake_case: Tuple = targets["""input_ids"""]
snake_case: str = shift_tokens_right(__lowerCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
snake_case: List[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_50, 2_42, 2, 5_00_03]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_00_01,
} , )
| 702 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class lowerCamelCase ( __snake_case ):
__lowerCamelCase = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **__lowerCamelCase ) -> Any:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case: List[Any] = deprecated_arg[3:]
setattr(self , __lowerCamelCase , not kwargs.pop(__lowerCamelCase ) )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
snake_case: Tuple = kwargs.pop("""torchscript""" , self.torchscript )
snake_case: int = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
snake_case: List[Any] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__lowerCamelCase )
__lowerCamelCase = field(default=__snake_case , metadata={'help': 'Trace the models using torchscript'} )
__lowerCamelCase = field(default=__snake_case , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
__lowerCamelCase = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def lowerCAmelCase_ ( self ) -> Tuple["torch.device", int]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
snake_case: List[str] = torch.device("""cpu""" )
snake_case: Optional[int] = 0
elif is_torch_tpu_available():
snake_case: Union[str, Any] = xm.xla_device()
snake_case: Dict = 0
else:
snake_case: List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
snake_case: str = torch.cuda.device_count()
return device, n_gpu
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowerCAmelCase_ ( self ) -> "torch.device":
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
return self.n_gpu > 0
| 164 | 0 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ = 2
class __lowerCAmelCase :
def __init__( self , *, # begin keyword-only arguments
__UpperCAmelCase="<s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = bos, unk, pad, eos
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = self.add_symbol(UpperCAmelCase_ )
__lowerCamelCase = self.add_symbol(UpperCAmelCase_ )
__lowerCamelCase = self.add_symbol(UpperCAmelCase_ )
__lowerCamelCase = self.add_symbol(UpperCAmelCase_ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase_ )
__lowerCamelCase = len(self.symbols )
def __eq__( self , __UpperCAmelCase ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
'''simple docstring'''
return len(self.symbols )
def __contains__( self , __UpperCAmelCase ):
'''simple docstring'''
return sym in self.indices
@classmethod
def lowerCamelCase ( cls , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = cls()
d.add_from_file(UpperCAmelCase_ )
return d
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=1 , __UpperCAmelCase=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
__lowerCamelCase = self.indices[word]
__lowerCamelCase = self.count[idx] + n
return idx
else:
__lowerCamelCase = len(self.symbols )
__lowerCamelCase = idx
self.symbols.append(UpperCAmelCase_ )
self.count.append(UpperCAmelCase_ )
return idx
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return 0
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
try:
with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(UpperCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(UpperCAmelCase_ ) )
return
__lowerCamelCase = f.readlines()
__lowerCamelCase = self._load_meta(UpperCAmelCase_ )
for line in lines[indices_start_line:]:
try:
__lowerCamelCase ,__lowerCamelCase = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
__lowerCamelCase = True
__lowerCamelCase ,__lowerCamelCase = line.rsplit(''' ''' , 1 )
else:
__lowerCamelCase = False
__lowerCamelCase = int(UpperCAmelCase_ )
__lowerCamelCase = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(UpperCAmelCase_ ) )
self.add_symbol(UpperCAmelCase_ , n=UpperCAmelCase_ , overwrite=UpperCAmelCase_ )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def a__ ( _UpperCamelCase : List[str] ):
__lowerCamelCase = dict((re.sub(R'''@@$''' ,'''''' ,_snake_case ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' ,'''</w>''' ,_snake_case ), v) for k, v in d.items() )
__lowerCamelCase = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowerCamelCase = d[k] # restore
return da
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Tuple ):
if not os.path.exists(_snake_case ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(_snake_case ,exist_ok=_snake_case )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
__lowerCamelCase = os.path.join(_snake_case ,'''checkpoint.pt''' )
if not os.path.isfile(_snake_case ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
__lowerCamelCase = torch.load(_snake_case ,map_location='''cpu''' )
__lowerCamelCase = chkpt['''cfg''']['''model''']
# dicts
__lowerCamelCase = os.path.join(_snake_case ,'''dict.txt''' )
if not os.path.isfile(_snake_case ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
__lowerCamelCase = Dictionary.load(_snake_case )
__lowerCamelCase = rewrite_dict_keys(src_dict.indices )
__lowerCamelCase = len(_snake_case )
__lowerCamelCase = os.path.join(_snake_case ,VOCAB_FILES_NAMES['''vocab_file'''] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(_snake_case ,ensure_ascii=_snake_case ,indent=_snake_case ) )
# merges_file (bpecodes)
__lowerCamelCase = os.path.join(_snake_case ,'''bpecodes''' )
if not os.path.isfile(_snake_case ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
__lowerCamelCase = os.path.join(_snake_case ,VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(_snake_case ,_snake_case )
# model config
__lowerCamelCase = os.path.join(_snake_case ,'''config.json''' )
__lowerCamelCase = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(_snake_case ,ensure_ascii=_snake_case ,indent=_snake_case ) )
# tokenizer config
__lowerCamelCase = os.path.join(_snake_case ,_snake_case )
__lowerCamelCase = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 10_24,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(_snake_case ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(_snake_case ,ensure_ascii=_snake_case ,indent=_snake_case ) )
# model
__lowerCamelCase = chkpt['''model''']
# remove unneeded keys
__lowerCamelCase = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(_snake_case ,_snake_case )
__lowerCamelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
__lowerCamelCase = model_state_dict.pop(_snake_case )
else:
__lowerCamelCase = model_state_dict.pop(_snake_case )
__lowerCamelCase = BioGptConfig.from_pretrained(_snake_case )
__lowerCamelCase = BioGptForCausalLM(_snake_case )
# check that it loads ok
model_new.load_state_dict(_snake_case )
# save
__lowerCamelCase = os.path.join(_snake_case ,_snake_case )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(_snake_case ,_snake_case )
print('''Conversion is done!''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 175 |
'''simple docstring'''
def lowerCamelCase ( _snake_case : Optional[Any] ,_snake_case : Union[str, Any] ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : List[str] ,_snake_case : List[str] ):
'''simple docstring'''
if index == r:
for j in range(_snake_case ):
print(data[j] ,end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowercase__ = arr[i]
combination_util(_snake_case ,_snake_case ,_snake_case ,index + 1 ,_snake_case ,i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCamelCase ( _snake_case : Optional[Any] ,_snake_case : Tuple ,_snake_case : Union[str, Any] ):
'''simple docstring'''
lowercase__ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_snake_case ,_snake_case ,_snake_case ,0 ,_snake_case ,0 )
if __name__ == "__main__":
# Driver code to check the function above
SCREAMING_SNAKE_CASE__ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 267 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = '▁'
__SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
__SCREAMING_SNAKE_CASE = {
'facebook/xglm-564M': 2048,
}
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase = None , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
SCREAMING_SNAKE_CASE_ : Dict =7
SCREAMING_SNAKE_CASE_ : Optional[int] =[F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
SCREAMING_SNAKE_CASE_ : Tuple =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Dict =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE_ : Dict =1
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE_ : List[str] ={'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
SCREAMING_SNAKE_CASE_ : str =len(self.sp_model )
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : Tuple =self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Dict =None
SCREAMING_SNAKE_CASE_ : Optional[int] =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] ={}
SCREAMING_SNAKE_CASE_ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
SCREAMING_SNAKE_CASE_ : Tuple =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase ))
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase ))
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : Any =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __lowerCamelCase ( self ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , __UpperCAmelCase ):
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ : Tuple =self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCamelCase ( self , __UpperCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple =''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ' ' ).strip()
return out_string
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE_ : List[Any] =os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : List[Any] =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 153 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = IMAGENET_DEFAULT_MEAN , __UpperCAmelCase = IMAGENET_DEFAULT_STD , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE_ : List[Any] =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ : Union[str, Any] =get_size_dict(__UpperCAmelCase , param_name='crop_size' )
SCREAMING_SNAKE_CASE_ : Tuple =do_resize
SCREAMING_SNAKE_CASE_ : Dict =size
SCREAMING_SNAKE_CASE_ : Tuple =resample
SCREAMING_SNAKE_CASE_ : List[str] =do_center_crop
SCREAMING_SNAKE_CASE_ : Optional[int] =crop_size
SCREAMING_SNAKE_CASE_ : int =do_rescale
SCREAMING_SNAKE_CASE_ : List[Any] =rescale_factor
SCREAMING_SNAKE_CASE_ : Any =do_normalize
SCREAMING_SNAKE_CASE_ : Tuple =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE_ : Tuple =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE_ : List[str] =int((256 / 224) * size['shortest_edge'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] =get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple ={'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
__UpperCAmelCase , size=(size_dict['height'], size_dict['width']) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : List[Any] =get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(__UpperCAmelCase , size=(size['height'], size['width']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Optional[int] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : List[str] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Tuple =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Union[str, Any] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Tuple =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : int =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[Any] =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : List[str] =size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Any =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Optional[Any] =get_size_dict(__UpperCAmelCase , param_name='crop_size' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Any =[to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Dict =[self.resize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : Any =[self.center_crop(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : List[Any] =[self.rescale(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : List[str] =[self.normalize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple =[to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple ={'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 153 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
_a = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class _UpperCAmelCase( lowercase_ ):
lowercase__ = "perceiver"
def __init__( self , __a=2_56 , __a=12_80 , __a=7_68 , __a=1 , __a=26 , __a=8 , __a=8 , __a=None , __a=None , __a="kv" , __a=1 , __a=1 , __a="gelu" , __a=0.1 , __a=0.02 , __a=1e-12 , __a=True , __a=2_62 , __a=20_48 , __a=56 , __a=[3_68, 4_96] , __a=16 , __a=19_20 , __a=16 , __a=[1, 16, 2_24, 2_24] , **__a , ) -> List[str]:
'''simple docstring'''
super().__init__(**a__)
_UpperCamelCase = num_latents
_UpperCamelCase = d_latents
_UpperCamelCase = d_model
_UpperCamelCase = num_blocks
_UpperCamelCase = num_self_attends_per_block
_UpperCamelCase = num_self_attention_heads
_UpperCamelCase = num_cross_attention_heads
_UpperCamelCase = qk_channels
_UpperCamelCase = v_channels
_UpperCamelCase = cross_attention_shape_for_attention
_UpperCamelCase = self_attention_widening_factor
_UpperCamelCase = cross_attention_widening_factor
_UpperCamelCase = hidden_act
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = use_query_residual
# masked language modeling attributes
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
# image classification attributes
_UpperCamelCase = image_size
# flow attributes
_UpperCamelCase = train_size
# multimodal autoencoding attributes
_UpperCamelCase = num_frames
_UpperCamelCase = audio_samples_per_frame
_UpperCamelCase = samples_per_patch
_UpperCamelCase = output_shape
class _UpperCAmelCase( lowercase_ ):
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-4
def UpperCAmelCase ( self , __a , __a = -1 , __a = -1 , __a = -1 , __a = False , __a = None , __a = 3 , __a = 40 , __a = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
if isinstance(a__ , a__):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCamelCase = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCamelCase = preprocessor.num_special_tokens_to_add(a__)
_UpperCamelCase = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a__)
# Generate dummy inputs according to compute batch and sequence
_UpperCamelCase = [""" """.join(['''a''']) * seq_length] * batch_size
_UpperCamelCase = dict(preprocessor(a__ , return_tensors=a__))
_UpperCamelCase = inputs.pop('''input_ids''')
return inputs
elif isinstance(a__ , a__) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCamelCase = compute_effective_axis_dimension(a__ , fixed_dimension=OnnxConfig.default_fixed_batch)
_UpperCamelCase = self._generate_dummy_images(a__ , a__ , a__ , a__)
_UpperCamelCase = dict(preprocessor(images=a__ , return_tensors=a__))
_UpperCamelCase = inputs.pop('''pixel_values''')
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''')
| 19 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case__ ( lowercase_):
'''simple docstring'''
def __init__( self , a__=0.01 , a__=10_00 ) -> List[Any]:
'''simple docstring'''
__snake_case :int = p_stop
__snake_case :List[Any] = max_length
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :str = 0
__snake_case :Optional[Any] = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case :str = random.random() < self.p_stop
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __lowercase ( self , a__ , a__ , a__=False , a__=True ) -> List[Any]:
'''simple docstring'''
__snake_case :Optional[Any] = [
BatchSamplerShard(a__ , 2 , a__ , split_batches=a__ , even_batches=a__ )
for i in range(2 )
]
__snake_case :Union[str, Any] = [list(a__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a__ ) for shard in batch_sampler_shards] , [len(a__ ) for e in expected] )
self.assertListEqual(a__ , a__ )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case :List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case :Optional[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case :List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ )
# Check the shards when the dataset is very small.
__snake_case :Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a__ , a__ )
__snake_case :Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[str] = [[], []]
self.check_batch_sampler_shards(a__ , a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
__snake_case :Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case :Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case :str = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :Dict = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
__snake_case :Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[Any] = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :List[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case :Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case :int = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=a__ )
__snake_case :Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case :Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case :Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :List[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
__snake_case :List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a__ )
__snake_case :int = [[], []]
self.check_batch_sampler_shards(a__ , a__ , even_batches=a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :int = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
__snake_case :Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=a__ )
# Expected shouldn't change
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case :List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=a__ )
__snake_case :Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case :Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :int = BatchSampler(range(21 ) , batch_size=4 , drop_last=a__ )
__snake_case :Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
# Check the shards when the dataset is very small.
__snake_case :List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
__snake_case :Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=a__ )
__snake_case :List[str] = [[], []]
self.check_batch_sampler_shards(a__ , a__ , split_batches=a__ , even_batches=a__ )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case :List[str] = [BatchSamplerShard(a__ , 2 , a__ , even_batches=a__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __lowercase ( self , a__ , a__ , a__ , a__=False , a__=2 , a__=False ) -> List[str]:
'''simple docstring'''
random.seed(a__ )
__snake_case :Optional[int] = list(a__ )
__snake_case :Union[str, Any] = [
IterableDatasetShard(
a__ , batch_size=a__ , drop_last=a__ , num_processes=a__ , process_index=a__ , split_batches=a__ , )
for i in range(a__ )
]
__snake_case :Any = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a__ )
iterable_dataset_lists.append(list(a__ ) )
__snake_case :Union[str, Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case :str = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a__ ) , len(a__ ) )
self.assertTrue(len(a__ ) % shard_batch_size == 0 )
__snake_case :int = []
for idx in range(0 , len(a__ ) , a__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a__ ) < len(a__ ):
reference += reference
self.assertListEqual(a__ , reference[: len(a__ )] )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :int = 42
__snake_case :Tuple = RandomIterableDataset()
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
# Edge case with a very small dataset
__snake_case :Optional[int] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
self.check_iterable_dataset_shards(a__ , a__ , batch_size=4 , drop_last=a__ , split_batches=a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :str = BatchSampler(range(16 ) , batch_size=4 , drop_last=a__ )
__snake_case :Optional[int] = SkipBatchSampler(a__ , 2 )
self.assertListEqual(list(a__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :str = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Union[str, Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case :Dict = skip_first_batches(a__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Any = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __lowercase ( self ) -> Any:
'''simple docstring'''
Accelerator()
__snake_case :Union[str, Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 455 | 0 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__A =numpy.array([0, 0])
__A =numpy.array([0.5, 0.8_6_6_0_2_5_4])
__A =numpy.array([1, 0])
__A =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = initial_vectors
for _ in range(UpperCamelCase__ ):
UpperCAmelCase__ : Dict = iteration_step(UpperCamelCase__ )
return vectors
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase__ : Tuple = vectors[i + 1]
new_vectors.append(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Any = numpy.radians(UpperCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : int = numpy.cos(UpperCamelCase__ ), numpy.sin(UpperCamelCase__ )
UpperCAmelCase__ : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Any = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = zip(*UpperCamelCase__ )
plt.plot(UpperCamelCase__ , UpperCamelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors) | 113 |
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__A =logging.getLogger()
def _UpperCamelCase ( ):
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
return args.f
class _snake_case ( a__ ):
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Any = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""")
with patch.object(_lowerCamelCase , """argv""" , _lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_lowerCamelCase , 0.666)
@slow
@require_torch_non_multi_gpu
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_lowerCamelCase)
UpperCAmelCase__ : Dict = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_lowerCamelCase)
UpperCAmelCase__ : Any = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_lowerCamelCase) | 113 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
SCREAMING_SNAKE_CASE__ : int =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
__snake_case = 'marian'
__snake_case = ['past_key_values']
__snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _lowercase=58101 , _lowercase=None , _lowercase=1024 , _lowercase=12 , _lowercase=4096 , _lowercase=16 , _lowercase=12 , _lowercase=4096 , _lowercase=16 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=True , _lowercase=True , _lowercase="gelu" , _lowercase=1024 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=58100 , _lowercase=False , _lowercase=58100 , _lowercase=0 , _lowercase=0 , _lowercase=True , **_lowercase , ) -> Optional[Any]:
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : int = decoder_vocab_size or vocab_size
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : Union[str, Any] = d_model
_lowerCamelCase : Tuple = encoder_ffn_dim
_lowerCamelCase : Optional[int] = encoder_layers
_lowerCamelCase : str = encoder_attention_heads
_lowerCamelCase : List[str] = decoder_ffn_dim
_lowerCamelCase : List[Any] = decoder_layers
_lowerCamelCase : Tuple = decoder_attention_heads
_lowerCamelCase : str = dropout
_lowerCamelCase : Optional[int] = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : str = activation_function
_lowerCamelCase : Tuple = init_std
_lowerCamelCase : Any = encoder_layerdrop
_lowerCamelCase : Optional[int] = decoder_layerdrop
_lowerCamelCase : str = use_cache
_lowerCamelCase : Dict = encoder_layers
_lowerCamelCase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : List[str] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
class _UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def a__ ( self ) -> Any:
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_lowerCamelCase : List[str] = {0: '''batch'''}
_lowerCamelCase : Optional[int] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_lowerCamelCase : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
_lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCamelCase : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_lowerCamelCase, _lowerCamelCase : str = self.num_layers
for i in range(_lowercase ):
_lowerCamelCase : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
_lowerCamelCase : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_lowerCamelCase : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def a__ ( self ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : List[Any] = super().outputs
else:
_lowerCamelCase : Union[str, Any] = super(_lowercase , self ).outputs
if self.use_past:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.num_layers
for i in range(_lowercase ):
_lowerCamelCase : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
_lowerCamelCase : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def a__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> Union[str, Any]:
_lowerCamelCase : str = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Generate decoder inputs
_lowerCamelCase : Optional[Any] = seq_length if not self.use_past else 1
_lowerCamelCase : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
_lowerCamelCase : List[Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCamelCase : List[Any] = dict(**_lowercase , **_lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_lowerCamelCase, _lowerCamelCase : int = common_inputs['''input_ids'''].shape
_lowerCamelCase : Union[str, Any] = common_inputs['''decoder_input_ids'''].shape[1]
_lowerCamelCase, _lowerCamelCase : Any = self.num_attention_heads
_lowerCamelCase : List[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCamelCase : List[str] = decoder_seq_length + 3
_lowerCamelCase : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCamelCase : List[str] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowercase , _lowercase )] , dim=1 )
_lowerCamelCase : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.num_layers
_lowerCamelCase : List[str] = min(_lowercase , _lowercase )
_lowerCamelCase : int = max(_lowercase , _lowercase ) - min_num_layers
_lowerCamelCase : List[str] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
) )
# TODO: test this.
_lowerCamelCase : List[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowercase , _lowercase ):
common_inputs["past_key_values"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) )
return common_inputs
def a__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> Dict:
_lowerCamelCase : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_lowerCamelCase, _lowerCamelCase : List[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_lowerCamelCase : List[str] = seqlen + 2
_lowerCamelCase, _lowerCamelCase : List[str] = self.num_layers
_lowerCamelCase, _lowerCamelCase : Tuple = self.num_attention_heads
_lowerCamelCase : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCamelCase : Any = common_inputs['''attention_mask'''].dtype
_lowerCamelCase : Dict = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
_lowerCamelCase : Any = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase )
]
return common_inputs
def a__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> int:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCamelCase : List[Any] = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(_lowercase )
_lowerCamelCase : str = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
_lowerCamelCase : Union[str, Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCamelCase : Any = dict(tokenizer(_lowercase , return_tensors=_lowercase ) )
return common_inputs
def a__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> str:
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
else:
_lowerCamelCase : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
return common_inputs
def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCamelCase : Tuple = super()._flatten_past_key_values_(_lowercase , _lowercase , _lowercase , _lowercase )
else:
_lowerCamelCase : Optional[Any] = super(_lowercase , self )._flatten_past_key_values_(
_lowercase , _lowercase , _lowercase , _lowercase )
@property
def a__ ( self ) -> Any:
return 1E-4
| 434 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowercase__ =logging.get_logger(__name__)
# General docstring
lowercase__ ='ResNetConfig'
# Base docstring
lowercase__ ='microsoft/resnet-50'
lowercase__ =[1, 20_48, 7, 7]
# Image classification docstring
lowercase__ ='microsoft/resnet-50'
lowercase__ ='tiger cat'
lowercase__ =[
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 3 , UpperCAmelCase = 1 , UpperCAmelCase = "relu" ):
super().__init__()
a_ = nn.Convad(
UpperCAmelCase , UpperCAmelCase , kernel_size=UpperCAmelCase , stride=UpperCAmelCase , padding=kernel_size // 2 , bias=UpperCAmelCase )
a_ = nn.BatchNormad(UpperCAmelCase )
a_ = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = self.convolution(UpperCAmelCase )
a_ = self.normalization(UpperCAmelCase )
a_ = self.activation(UpperCAmelCase )
return hidden_state
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
a_ = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
a_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
a_ = config.num_channels
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
a_ = self.embedder(UpperCAmelCase )
a_ = self.pooler(UpperCAmelCase )
return embedding
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 2 ):
super().__init__()
a_ = nn.Convad(UpperCAmelCase , UpperCAmelCase , kernel_size=1 , stride=UpperCAmelCase , bias=UpperCAmelCase )
a_ = nn.BatchNormad(UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = self.convolution(UpperCAmelCase )
a_ = self.normalization(UpperCAmelCase )
return hidden_state
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = "relu" ):
super().__init__()
a_ = in_channels != out_channels or stride != 1
a_ = (
ResNetShortCut(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
a_ = nn.Sequential(
ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , activation=UpperCAmelCase ) , )
a_ = ACTaFN[activation]
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = hidden_state
a_ = self.layer(UpperCAmelCase )
a_ = self.shortcut(UpperCAmelCase )
hidden_state += residual
a_ = self.activation(UpperCAmelCase )
return hidden_state
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = "relu" , UpperCAmelCase = 4 ):
super().__init__()
a_ = in_channels != out_channels or stride != 1
a_ = out_channels // reduction
a_ = (
ResNetShortCut(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
a_ = nn.Sequential(
ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , kernel_size=1 ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , kernel_size=1 , activation=UpperCAmelCase ) , )
a_ = ACTaFN[activation]
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = hidden_state
a_ = self.layer(UpperCAmelCase )
a_ = self.shortcut(UpperCAmelCase )
hidden_state += residual
a_ = self.activation(UpperCAmelCase )
return hidden_state
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 2 , UpperCAmelCase = 2 , ):
super().__init__()
a_ = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
a_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase , activation=config.hidden_act ) , *[layer(UpperCAmelCase , UpperCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = input
for layer in self.layers:
a_ = layer(UpperCAmelCase )
return hidden_state
class a_ ( nn.Module ):
def __init__( self , UpperCAmelCase ):
super().__init__()
a_ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
a_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , depth=UpperCAmelCase ) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = True ):
a_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a_ = hidden_states + (hidden_state,)
a_ = stage_module(UpperCAmelCase )
if output_hidden_states:
a_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCAmelCase , hidden_states=UpperCAmelCase , )
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = ResNetConfig
lowerCamelCase__ : Dict = 'resnet'
lowerCamelCase__ : Dict = 'pixel_values'
lowerCamelCase__ : str = True
def lowerCAmelCase__ ( self , UpperCAmelCase ):
if isinstance(UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=False ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
a_ = value
lowercase__ =r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowercase__ =r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , UpperCamelCase__ , )
class a_ ( UpperCamelCase__ ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
a_ = config
a_ = ResNetEmbeddings(UpperCAmelCase )
a_ = ResNetEncoder(UpperCAmelCase )
a_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ):
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.embedder(UpperCAmelCase )
a_ = self.encoder(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
a_ = encoder_outputs[0]
a_ = self.pooler(UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase , pooler_output=UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCamelCase__ , )
class a_ ( UpperCamelCase__ ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
a_ = config.num_labels
a_ = ResNetModel(UpperCAmelCase )
# classification head
a_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ):
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.resnet(UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
a_ = outputs.pooler_output if return_dict else outputs[1]
a_ = self.classifier(UpperCAmelCase )
a_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a_ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a_ = """single_label_classification"""
else:
a_ = """multi_label_classification"""
if self.config.problem_type == "regression":
a_ = MSELoss()
if self.num_labels == 1:
a_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a_ = loss_fct(UpperCAmelCase , UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
a_ = CrossEntropyLoss()
a_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a_ = BCEWithLogitsLoss()
a_ = loss_fct(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
a_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , UpperCamelCase__ , )
class a_ ( UpperCamelCase__ , UpperCamelCase__ ):
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
super()._init_backbone(UpperCAmelCase )
a_ = [config.embedding_size] + config.hidden_sizes
a_ = ResNetEmbeddings(UpperCAmelCase )
a_ = ResNetEncoder(UpperCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@replace_return_docstrings(output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ):
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = self.embedder(UpperCAmelCase )
a_ = self.encoder(UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
a_ = outputs.hidden_states
a_ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=UpperCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCAmelCase , )
| 263 | 0 |
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict[str, torch.Tensor] )-> Dict[str, torch.Tensor]:
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
for rt in rc.restypes:
_lowerCamelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_lowerCamelCase = {name: i for i, name in enumerate(snake_case )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_lowerCamelCase = torch.tensor(
snake_case , dtype=torch.intaa , device=protein['aatype'].device , )
_lowerCamelCase = torch.tensor(
snake_case , dtype=torch.intaa , device=protein['aatype'].device , )
_lowerCamelCase = torch.tensor(
snake_case , dtype=torch.floataa , device=protein['aatype'].device , )
_lowerCamelCase = protein['aatype'].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_lowerCamelCase = restype_atomaa_to_atomaa[protein_aatype]
_lowerCamelCase = restype_atomaa_mask[protein_aatype]
_lowerCamelCase = residx_atomaa_mask
_lowerCamelCase = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_lowerCamelCase = restype_atomaa_to_atomaa[protein_aatype]
_lowerCamelCase = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_lowerCamelCase = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
_lowerCamelCase = rc.restype_atoa[restype_letter]
_lowerCamelCase = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_lowerCamelCase = rc.atom_order[atom_name]
_lowerCamelCase = 1
_lowerCamelCase = restype_atomaa_mask[protein_aatype]
_lowerCamelCase = residx_atomaa_mask
return protein
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict[str, torch.Tensor] )-> Dict[str, np.ndarray]:
_lowerCamelCase = tree_map(lambda snake_case : torch.tensor(snake_case , device=batch['aatype'].device ) , snake_case , np.ndarray )
_lowerCamelCase = tensor_tree_map(lambda snake_case : np.array(snake_case ) , make_atomaa_masks(snake_case ) )
return out
| 222 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Optional[int] =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __a ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Tuple = XGLMTokenizer
SCREAMING_SNAKE_CASE__ : Tuple = XGLMTokenizerFast
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def snake_case_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self ):
_lowerCamelCase = '<pad>'
_lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def snake_case_ ( self ):
_lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(a__ ) , 10_08 )
def snake_case_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_08 )
def snake_case_ ( self ):
_lowerCamelCase = XGLMTokenizer(a__ , keep_accents=a__ )
_lowerCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(a__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_lowerCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCamelCase = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def snake_case_ ( self ):
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def snake_case_ ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
_lowerCamelCase = XGLMTokenizer(f.name , keep_accents=a__ )
_lowerCamelCase = pickle.dumps(a__ )
pickle.loads(a__ )
def snake_case_ ( self ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = 'I was born in 92000, and this is falsé.'
_lowerCamelCase = tokenizer.tokenize(a__ )
_lowerCamelCase = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCamelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCamelCase = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCamelCase = self.get_rust_tokenizer()
_lowerCamelCase = tokenizer.encode(a__ )
_lowerCamelCase = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def snake_case_ ( self ):
_lowerCamelCase = 'Hello World!'
_lowerCamelCase = [2, 3_12_27, 44_47, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def snake_case_ ( self ):
_lowerCamelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
_lowerCamelCase = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def snake_case_ ( self ):
# fmt: off
_lowerCamelCase = {
'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='facebook/xglm-564M' , padding=a__ , )
| 222 | 1 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : int=13 , _lowerCAmelCase : Optional[int]=7 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Union[str, Any]=99 , _lowerCAmelCase : int=0 , _lowerCAmelCase : Tuple=32 , _lowerCAmelCase : Union[str, Any]=5 , _lowerCAmelCase : Any=4 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : List[Any]=512 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Dict="last" , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : int=None , _lowerCAmelCase : str=0 , ) -> List[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_lengths
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = gelu_activation
__lowercase = sinusoidal_embeddings
__lowercase = causal
__lowercase = asm
__lowercase = n_langs
__lowercase = vocab_size
__lowercase = n_special
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = summary_type
__lowercase = use_proj
__lowercase = scope
__lowercase = bos_token_id
def _a ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_input_lengths:
__lowercase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , 2 ).float()
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _a ( self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , ) -> int:
"""simple docstring"""
__lowercase = XLMModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , lengths=_lowerCAmelCase , langs=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , langs=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
__lowercase = XLMWithLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , ) -> str:
"""simple docstring"""
__lowercase = XLMForQuestionAnsweringSimple(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase )
__lowercase = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , ) -> List[str]:
"""simple docstring"""
__lowercase = XLMForQuestionAnswering(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
__lowercase = model(
_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , cls_index=_lowerCAmelCase , is_impossible=_lowerCAmelCase , p_mask=_lowerCAmelCase , )
__lowercase = model(
_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , cls_index=_lowerCAmelCase , is_impossible=_lowerCAmelCase , )
((__lowercase) , ) = result_with_labels.to_tuple()
__lowercase = model(_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase )
((__lowercase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _a ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any , ) -> Dict:
"""simple docstring"""
__lowercase = XLMForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = XLMForTokenClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = XLMForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__snake_case :Tuple = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__snake_case :Optional[Any] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple ) -> int:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any]=False ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = XLMModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , emb_dim=37 )
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_lowerCAmelCase )
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_lowerCAmelCase )
def _a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_lowerCAmelCase )
def _a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_lowerCAmelCase )
def _a ( self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any=False , _lowerCAmelCase : List[str]=1 ) -> Tuple:
"""simple docstring"""
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(
[isinstance(_lowerCAmelCase , _lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(_lowerCAmelCase ) )
self.assertEqual(len(_lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_lowerCAmelCase ):
# adds PAD dummy token
__lowercase = min_length + idx + 1
__lowercase = min_length + idx + 1
__lowercase = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_lowerCAmelCase ) )
def _a ( self : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Dict=1 ) -> int:
"""simple docstring"""
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(
[isinstance(_lowerCAmelCase , _lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(_lowerCAmelCase ) , )
self.assertEqual(len(_lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_lowerCAmelCase ):
# adds PAD dummy token
__lowercase = min_length + idx + 1
__lowercase = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_lowerCAmelCase ) , )
pass
@slow
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = XLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(_lowerCAmelCase )
__lowercase = torch.tensor([[14, 447]] , dtype=torch.long , device=_lowerCAmelCase ) # the president
__lowercase = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _lowerCAmelCase )
| 80 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BlenderbotConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = """gelu"""
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=False , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=20 , _snake_case=2 , _snake_case=1 , _snake_case=0 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_blenderbot_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, inputs_dict
def snake_case_ ( self , _snake_case , _snake_case ) -> Any:
"""simple docstring"""
UpperCAmelCase = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(_snake_case , attention_mask=_snake_case , head_mask=_snake_case , use_cache=_snake_case )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase = model(_snake_case , attention_mask=_snake_case )[0]
UpperCAmelCase = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1e-3 )
def _lowerCAmelCase ( A__: int , A__: Tuple , A__: Union[str, Any] , A__: Optional[int]=None , A__: Any=None , A__: int=None , A__: str=None , A__: int=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = TFBlenderbotModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_snake_case )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["""My friends are cool but they eat too many carbs."""]
__SCREAMING_SNAKE_CASE = """facebook/blenderbot-400M-distill"""
@cached_property
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 254 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( _lowercase ):
snake_case = '''gptsan-japanese'''
snake_case = [
'''past_key_values''',
]
snake_case = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Any=3_6000 , SCREAMING_SNAKE_CASE_ : Dict=1280 , SCREAMING_SNAKE_CASE_ : Any=1024 , SCREAMING_SNAKE_CASE_ : Optional[int]=8192 , SCREAMING_SNAKE_CASE_ : int=4096 , SCREAMING_SNAKE_CASE_ : Any=128 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=10 , SCREAMING_SNAKE_CASE_ : str=0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE_ : str=16 , SCREAMING_SNAKE_CASE_ : str=128 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=1e-5 , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE_ : int="float32" , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : str=0.0_0_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=3_5998 , SCREAMING_SNAKE_CASE_ : Dict=3_5995 , SCREAMING_SNAKE_CASE_ : str=3_5999 , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = d_model
lowerCamelCase__ = d_ff
lowerCamelCase__ = d_ext
lowerCamelCase__ = d_spout
lowerCamelCase__ = num_switch_layers
lowerCamelCase__ = num_ext_layers
lowerCamelCase__ = num_switch_layers + num_ext_layers
lowerCamelCase__ = num_heads
lowerCamelCase__ = num_experts
lowerCamelCase__ = expert_capacity
lowerCamelCase__ = dropout_rate
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = router_bias
lowerCamelCase__ = router_jitter_noise
lowerCamelCase__ = router_dtype
lowerCamelCase__ = router_ignore_padding_tokens
lowerCamelCase__ = output_hidden_states
lowerCamelCase__ = output_attentions
lowerCamelCase__ = initializer_factor
lowerCamelCase__ = output_router_logits
lowerCamelCase__ = use_cache
super().__init__(
separator_token_id=A_ , pad_token_id=A_ , eos_token_id=A_ , **A_ , )
| 700 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class SCREAMING_SNAKE_CASE__ ( datasets.BeamBasedBuilder ):
def __UpperCAmelCase ( self : List[Any] ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=SCREAMING_SNAKE_CASE_ , )
def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( datasets.BeamBasedBuilder ):
def __UpperCAmelCase ( self : Optional[int] ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=SCREAMING_SNAKE_CASE_ , )
def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE_ )
def _A ( ):
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def _A ( ):
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
@require_beam
def __UpperCAmelCase ( self : Any ):
lowerCamelCase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , """default""" , """0.0.0""" , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , SCREAMING_SNAKE_CASE_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __UpperCAmelCase ( self : str ):
import apache_beam as beam
lowerCamelCase__ = beam.io.parquetio.WriteToParquet
lowerCamelCase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
lowerCamelCase__ = partial(SCREAMING_SNAKE_CASE_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE_ , builder.name , """default""" , """0.0.0""" , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE_ , builder.name , """default""" , """0.0.0""" , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , SCREAMING_SNAKE_CASE_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , SCREAMING_SNAKE_CASE_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __UpperCAmelCase ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = NestedBeamDataset(cache_dir=SCREAMING_SNAKE_CASE_ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , """default""" , """0.0.0""" , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , SCREAMING_SNAKE_CASE_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 258 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__A = logging.getLogger(__name__)
def lowercase__ ( A_: str , A_: Optional[int] ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase =np.argmax(A_ , axis=1 )
return np.sum(outputs == labels )
def lowercase__ ( A_: Optional[int] ) -> Dict:
"""simple docstring"""
with open(A_ , encoding="""utf_8""" ) as f:
__UpperCAmelCase =csv.reader(A_ )
__UpperCAmelCase =[]
next(A_ ) # skip the first line
for line in tqdm(A_ ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowercase__ ( A_: Optional[Any] , A_: Dict , A_: Optional[Any] , A_: int , A_: Dict , A_: Optional[int] ) -> Dict:
"""simple docstring"""
__UpperCAmelCase =[]
for dataset in encoded_datasets:
__UpperCAmelCase =len(A_ )
__UpperCAmelCase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__UpperCAmelCase =np.zeros((n_batch, 2) , dtype=np.intaa )
__UpperCAmelCase =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__UpperCAmelCase =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(A_ ):
__UpperCAmelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCAmelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCAmelCase =with_conta
__UpperCAmelCase =with_conta
__UpperCAmelCase =len(A_ ) - 1
__UpperCAmelCase =len(A_ ) - 1
__UpperCAmelCase =with_conta
__UpperCAmelCase =with_conta
__UpperCAmelCase =mc_label
__UpperCAmelCase =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(A_ ) for t in all_inputs ) )
return tensor_datasets
def lowercase__ ( ) -> str:
"""simple docstring"""
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=A_ , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=A_ , type=A_ , required=A_ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=A_ , default="""""" )
parser.add_argument("""--eval_dataset""" , type=A_ , default="""""" )
parser.add_argument("""--seed""" , type=A_ , default=42 )
parser.add_argument("""--num_train_epochs""" , type=A_ , default=3 )
parser.add_argument("""--train_batch_size""" , type=A_ , default=8 )
parser.add_argument("""--eval_batch_size""" , type=A_ , default=16 )
parser.add_argument("""--adam_epsilon""" , default=1e-8 , type=A_ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=A_ , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=A_ , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=A_ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=A_ , default=6.25e-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=A_ , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=A_ , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=A_ , default=0.0_1 )
parser.add_argument("""--lm_coef""" , type=A_ , default=0.9 )
parser.add_argument("""--n_valid""" , type=A_ , default=374 )
parser.add_argument("""--server_ip""" , type=A_ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=A_ , default="""""" , help="""Can be used for distant debugging.""" )
__UpperCAmelCase =parser.parse_args()
print(A_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__UpperCAmelCase =torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__UpperCAmelCase =torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(A_ , A_ ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__UpperCAmelCase =["""_start_""", """_delimiter_""", """_classify_"""]
__UpperCAmelCase =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(A_ )
__UpperCAmelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCAmelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(A_ ) )
model.to(A_ )
# Load and encode the datasets
def tokenize_and_encode(A_: Tuple ):
if isinstance(A_ , A_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(A_ ) )
elif isinstance(A_ , A_ ):
return obj
return [tokenize_and_encode(A_ ) for o in obj]
logger.info("""Encoding dataset...""" )
__UpperCAmelCase =load_rocstories_dataset(args.train_dataset )
__UpperCAmelCase =load_rocstories_dataset(args.eval_dataset )
__UpperCAmelCase =(train_dataset, eval_dataset)
__UpperCAmelCase =tokenize_and_encode(A_ )
# Compute the max input length for the Transformer
__UpperCAmelCase =model.config.n_positions // 2 - 2
__UpperCAmelCase =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__UpperCAmelCase =min(A_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__UpperCAmelCase =pre_process_datasets(A_ , A_ , A_ , *A_ )
__UpperCAmelCase , __UpperCAmelCase =tensor_datasets[0], tensor_datasets[1]
__UpperCAmelCase =TensorDataset(*A_ )
__UpperCAmelCase =RandomSampler(A_ )
__UpperCAmelCase =DataLoader(A_ , sampler=A_ , batch_size=args.train_batch_size )
__UpperCAmelCase =TensorDataset(*A_ )
__UpperCAmelCase =SequentialSampler(A_ )
__UpperCAmelCase =DataLoader(A_ , sampler=A_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__UpperCAmelCase =args.max_steps
__UpperCAmelCase =args.max_steps // (len(A_ ) // args.gradient_accumulation_steps) + 1
else:
__UpperCAmelCase =len(A_ ) // args.gradient_accumulation_steps * args.num_train_epochs
__UpperCAmelCase =list(model.named_parameters() )
__UpperCAmelCase =["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
__UpperCAmelCase =[
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
__UpperCAmelCase =AdamW(A_ , lr=args.learning_rate , eps=args.adam_epsilon )
__UpperCAmelCase =get_linear_schedule_with_warmup(
A_ , num_warmup_steps=args.warmup_steps , num_training_steps=A_ )
if args.do_train:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
__UpperCAmelCase =0
__UpperCAmelCase =0
__UpperCAmelCase =tqdm(A_ , desc="""Training""" )
for step, batch in enumerate(A_ ):
__UpperCAmelCase =tuple(t.to(A_ ) for t in batch )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =batch
__UpperCAmelCase =model(A_ , mc_token_ids=A_ , lm_labels=A_ , mc_labels=A_ )
__UpperCAmelCase =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__UpperCAmelCase =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__UpperCAmelCase ="""Training loss: {:.2e} lr: {:.2e}""".format(A_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__UpperCAmelCase =model.module if hasattr(A_ , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__UpperCAmelCase =os.path.join(args.output_dir , A_ )
__UpperCAmelCase =os.path.join(args.output_dir , A_ )
torch.save(model_to_save.state_dict() , A_ )
model_to_save.config.to_json_file(A_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__UpperCAmelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__UpperCAmelCase =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(A_ )
if args.do_eval:
model.eval()
__UpperCAmelCase , __UpperCAmelCase =0, 0
__UpperCAmelCase , __UpperCAmelCase =0, 0
for batch in tqdm(A_ , desc="""Evaluating""" ):
__UpperCAmelCase =tuple(t.to(A_ ) for t in batch )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =batch
with torch.no_grad():
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =model(
A_ , mc_token_ids=A_ , lm_labels=A_ , mc_labels=A_ )
__UpperCAmelCase =mc_logits.detach().cpu().numpy()
__UpperCAmelCase =mc_labels.to("""cpu""" ).numpy()
__UpperCAmelCase =accuracy(A_ , A_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__UpperCAmelCase =eval_loss / nb_eval_steps
__UpperCAmelCase =eval_accuracy / nb_eval_examples
__UpperCAmelCase =tr_loss / nb_tr_steps if args.do_train else None
__UpperCAmelCase ={"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
__UpperCAmelCase =os.path.join(args.output_dir , """eval_results.txt""" )
with open(A_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , A_ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 68 |
def a_ ( SCREAMING_SNAKE_CASE__ : bytes ):
'''simple docstring'''
return "".join([hex(SCREAMING_SNAKE_CASE__ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE__ )] )
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if (len(SCREAMING_SNAKE_CASE__ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE__ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 464 | 0 |
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __UpperCAmelCase ( *a_: Tuple ):
if not isinstance(a_, a_ ):
_UpperCAmelCase : Optional[int] = list(a_ )
for i in range(len(a_ ) ):
_UpperCAmelCase : List[Any] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __UpperCAmelCase ( a_: Exception ):
_UpperCAmelCase : Optional[Any] = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(a_, a_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __UpperCAmelCase ( a_: callable = None, a_: int = 128 ):
if function is None:
return functools.partial(a_, starting_batch_size=a_ )
_UpperCAmelCase : Tuple = starting_batch_size
def decorator(*a_: str, **a_: int ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase : Optional[Any] = list(inspect.signature(a_ ).parameters.keys() )
# Guard against user error
if len(a_ ) < (len(a_ ) + 1):
_UpperCAmelCase : Dict = ", ".join([f"""{arg}={value}""" for arg, value in zip(params[1:], args[1:] )] )
raise TypeError(
f"""Batch size was passed into `{function.__name__}` as the first argument when called."""
f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(a_, *a_, **a_ )
except Exception as e:
if should_reduce_batch_size(a_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 257 | '''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__a = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __UpperCAmelCase ( a_: Union[str, Any] ):
config.addinivalue_line(
"markers", "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers", "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers", "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers", "tool_tests: mark the tool tests that are run on their specific schedule" )
def __UpperCAmelCase ( a_: Any ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __UpperCAmelCase ( a_: Tuple ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(a_, id=a_ )
def __UpperCAmelCase ( a_: Union[str, Any], a_: str ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
_UpperCAmelCase : List[Any] = 0
# Doctest custom flag to ignore output.
__a = doctest.register_optionflag('IGNORE_RESULT')
__a = doctest.OutputChecker
class A__ ( UpperCamelCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] ) -> Dict:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a = CustomOutputChecker
__a = HfDoctestModule
__a = HfDocTestParser | 257 | 1 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
SCREAMING_SNAKE_CASE__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ):
__a : Dict = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
__a : Union[str, Any] = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , lowerCamelCase_ , )
is not None
):
__a : Dict = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__a : Dict = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__a : List[Any] = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
__a : Optional[Any] = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
__a : Optional[Any] = True
if not attribute_used:
__a : Any = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__a : int = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__a : List[str] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__a : Tuple = True
elif attribute.endswith('_token_id' ):
__a : Dict = True
# configuration class specific cases
if not case_allowed:
__a : Tuple = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__a : Optional[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
__a : str = dict(inspect.signature(config_class.__init__ ).parameters )
__a : Optional[Any] = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
__a : List[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__a : Any = {}
if len(config_class.attribute_map ) > 0:
__a : Optional[int] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__a : Union[str, Any] = inspect.getsourcefile(lowerCamelCase_ )
__a : str = os.path.dirname(lowerCamelCase_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__a : Dict = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for fn in os.listdir(lowerCamelCase_ ) if fn.startswith('modeling_' )]
# Get the source code strings
__a : str = []
for path in modeling_paths:
if os.path.isfile(lowerCamelCase_ ):
with open(lowerCamelCase_ ) as fp:
modeling_sources.append(fp.read() )
__a : str = []
for config_param, default_value in zip(lowerCamelCase_ , lowerCamelCase_ ):
# `attributes` here is all the variant names for `config_param`
__a : Dict = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCamelCase_ )
def UpperCAmelCase__ ( ):
__a : Dict = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__a : List[Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCamelCase_ : inspect.isclass(lowerCamelCase_ )
and issubclass(lowerCamelCase_ , lowerCamelCase_ )
and inspect.getmodule(lowerCamelCase_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__a : List[Any] = check_config_attributes_being_used(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
__a : Optional[Any] = unused_attributes
if len(lowerCamelCase_ ) > 0:
__a : str = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(lowerCamelCase_ )
if __name__ == "__main__":
check_config_attributes()
| 47 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class _snake_case ( snake_case_ ):
'''simple docstring'''
__snake_case = "bloom"
__snake_case = ["past_key_values"]
__snake_case = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self: Union[str, Any] , __UpperCamelCase: Any=25_0880 , __UpperCamelCase: str=64 , __UpperCamelCase: Any=2 , __UpperCamelCase: str=8 , __UpperCamelCase: str=1E-5 , __UpperCamelCase: Optional[int]=0.0_2 , __UpperCamelCase: Any=True , __UpperCamelCase: Tuple=1 , __UpperCamelCase: Optional[Any]=2 , __UpperCamelCase: List[Any]=False , __UpperCamelCase: Any=0.0 , __UpperCamelCase: str=0.0 , __UpperCamelCase: List[str]=1 , __UpperCamelCase: str=False , **__UpperCamelCase: Optional[int] , ) -> List[Any]:
__magic_name__ : Any = vocab_size
# Backward compatibility with n_embed kwarg
__magic_name__ : Any = kwargs.pop("n_embed" , __UpperCamelCase )
__magic_name__ : str = hidden_size if n_embed is None else n_embed
__magic_name__ : Tuple = n_layer
__magic_name__ : Tuple = n_head
__magic_name__ : Optional[int] = layer_norm_epsilon
__magic_name__ : List[Any] = initializer_range
__magic_name__ : int = use_cache
__magic_name__ : str = pretraining_tp
__magic_name__ : Any = apply_residual_connection_post_layernorm
__magic_name__ : Union[str, Any] = hidden_dropout
__magic_name__ : Optional[int] = attention_dropout
__magic_name__ : Dict = bos_token_id
__magic_name__ : List[Any] = eos_token_id
__magic_name__ : Union[str, Any] = slow_but_exact
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
class _snake_case ( snake_case_ ):
'''simple docstring'''
__snake_case = version.parse("1.12" )
def __init__( self: str , __UpperCamelCase: PretrainedConfig , __UpperCamelCase: str = "default" , __UpperCamelCase: List[PatchingSpec] = None , __UpperCamelCase: bool = False , ) -> List[Any]:
super().__init__(__UpperCamelCase , task=__UpperCamelCase , patching_specs=__UpperCamelCase , use_past=__UpperCamelCase )
if not getattr(self._config , "pad_token_id" , __UpperCamelCase ):
# TODO: how to do that better?
__magic_name__ : Any = 0
@property
def lowerCAmelCase__ ( self: Any ) -> Mapping[str, Mapping[int, str]]:
__magic_name__ : List[str] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" , inverted_values_shape=__UpperCamelCase )
__magic_name__ : Any = {0: "batch", 1: "past_sequence + sequence"}
else:
__magic_name__ : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCAmelCase__ ( self: Dict ) -> int:
return self._config.n_layer
@property
def lowerCAmelCase__ ( self: List[str] ) -> int:
return self._config.n_head
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> float:
return 1E-3
def lowerCAmelCase__ ( self: Optional[int] , __UpperCamelCase: "PreTrainedTokenizer" , __UpperCamelCase: int = -1 , __UpperCamelCase: int = -1 , __UpperCamelCase: bool = False , __UpperCamelCase: Optional["TensorType"] = None , ) -> Mapping[str, Any]:
__magic_name__ : Dict = super(__UpperCamelCase , self ).generate_dummy_inputs(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
# We need to order the input in the way they appears in the forward()
__magic_name__ : Optional[int] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__magic_name__ , __magic_name__ : Union[str, Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__magic_name__ : Dict = seqlen + 2
__magic_name__ : Optional[Any] = self._config.hidden_size // self.num_attention_heads
__magic_name__ : List[str] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__magic_name__ : Any = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__magic_name__ : str = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(self.num_layers )
]
__magic_name__ : Tuple = common_inputs["attention_mask"]
if self.use_past:
__magic_name__ : Tuple = ordered_inputs["attention_mask"].dtype
__magic_name__ : Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase__ ( self: Optional[int] ) -> int:
return 13 | 436 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=18 , lowercase__=30 , lowercase__=4_00 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=None , ):
snake_case_ : Dict = size if size is not None else {"""shortest_edge""": 20}
snake_case_ : List[str] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
snake_case_ : str = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Union[str, Any] = num_channels
snake_case_ : Optional[int] = image_size
snake_case_ : Dict = min_resolution
snake_case_ : Union[str, Any] = max_resolution
snake_case_ : Any = do_resize
snake_case_ : Dict = size
snake_case_ : Union[str, Any] = do_center_crop
snake_case_ : Dict = crop_size
def __UpperCamelCase (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Optional[int] = MobileNetVaImageProcessor if is_vision_available() else None
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = MobileNetVaImageProcessingTester(self )
@property
def __UpperCamelCase (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase (self ):
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase__ , """size""" ) )
self.assertTrue(hasattr(lowercase__ , """do_center_crop""" ) )
self.assertTrue(hasattr(lowercase__ , """crop_size""" ) )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
snake_case_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __UpperCamelCase (self ):
pass
def __UpperCamelCase (self ):
# Initialize image_processing
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
snake_case_ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCamelCase (self ):
# Initialize image_processing
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : List[Any] = image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCamelCase (self ):
# Initialize image_processing
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ : int = image_processing(lowercase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 48 |
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
if start < end:
snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = a[end]
snake_case_ : Dict = a[pivot]
snake_case_ : Any = temp
snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
snake_case_ : Tuple = 0
snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : Dict = a[end]
snake_case_ : List[Any] = a[pivot]
snake_case_ : Optional[Any] = temp
snake_case_ : List[str] = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ : Any = new_pivot_index + 1
snake_case_ : Tuple = a[new_pivot_index]
snake_case_ : Optional[int] = a[index]
snake_case_ : Tuple = temp
snake_case_ : Union[str, Any] = a[new_pivot_index + 1]
snake_case_ : Union[str, Any] = a[end]
snake_case_ : Union[str, Any] = temp
return new_pivot_index + 1, count
a_ = TemporaryFile()
a_ = 100 # 1000 elements are to be sorted
a_ , a_ = 0, 1 # mean and standard deviation
a_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a_ = np.load(outfile)
a_ = len(M) - 1
a_ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 48 | 1 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _UpperCamelCase ( ):
UpperCamelCase_ =[randint(-1_000 , 1_000 ) for i in range(10 )]
UpperCamelCase_ =randint(-5_000 , 5_000 )
return (arr, r)
A_ = make_dataset()
def _UpperCamelCase ( A , A ):
for triplet in permutations(A , 3 ):
if sum(A ) == target:
return tuple(sorted(A ) )
return (0, 0, 0)
def _UpperCamelCase ( A , A ):
arr.sort()
UpperCamelCase_ =len(A )
for i in range(n - 1 ):
UpperCamelCase_ , UpperCamelCase_ =i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _UpperCamelCase ( ):
UpperCamelCase_ ="\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
UpperCamelCase_ ="\ntriplet_sum1(*dataset)\n"
UpperCamelCase_ ="\ntriplet_sum2(*dataset)\n"
UpperCamelCase_ =repeat(setup=A , stmt=A , repeat=5 , number=10_000 )
UpperCamelCase_ =repeat(setup=A , stmt=A , repeat=5 , number=10_000 )
return (min(A ), min(A ))
if __name__ == "__main__":
from doctest import testmod
testmod()
A_ = solution_times()
print(f'''The time for naive implementation is {times[0]}.''')
print(f'''The time for optimized implementation is {times[1]}.''')
| 391 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = "▁"
A_ = {"vocab_file": "sentencepiece.bpe.model"}
A_ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
A_ = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
A_ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = VOCAB_FILES_NAMES
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
__lowerCamelCase : List[int] = []
__lowerCamelCase : List[int] = []
def __init__( self: int , UpperCamelCase_: Dict , UpperCamelCase_: Any="<s>" , UpperCamelCase_: Dict="</s>" , UpperCamelCase_: Tuple="</s>" , UpperCamelCase_: int="<s>" , UpperCamelCase_: Union[str, Any]="<unk>" , UpperCamelCase_: Union[str, Any]="<pad>" , UpperCamelCase_: int="<mask>" , UpperCamelCase_: str=None , UpperCamelCase_: Any=None , UpperCamelCase_: int=None , UpperCamelCase_: Optional[Dict[str, Any]] = None , UpperCamelCase_: str=None , UpperCamelCase_: Optional[Any]=False , **UpperCamelCase_: Optional[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_ =AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
UpperCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase_ =legacy_behaviour
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
UpperCamelCase_ =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase_ ={"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase_ =1
UpperCamelCase_ =len(self.sp_model )
UpperCamelCase_ ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase_ )
}
UpperCamelCase_ ={v: k for k, v in self.lang_code_to_id.items()}
UpperCamelCase_ =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCamelCase_ ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCamelCase_ =list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCamelCase_ =src_lang if src_lang is not None else "eng_Latn"
UpperCamelCase_ =self.lang_code_to_id[self._src_lang]
UpperCamelCase_ =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self: List[Any] ):
UpperCamelCase_ =self.__dict__.copy()
UpperCamelCase_ =None
UpperCamelCase_ =self.sp_model.serialized_model_proto()
return state
def __setstate__( self: int , UpperCamelCase_: List[str] ):
UpperCamelCase_ =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase_ ={}
UpperCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCamelCase__ ( self: List[Any] ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self: Any ):
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
UpperCamelCase_ =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
UpperCamelCase_ =[1] * len(self.prefix_tokens )
UpperCamelCase_ =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
UpperCamelCase_ =[self.sep_token_id]
UpperCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Optional[str] , UpperCamelCase_: Optional[str] , **UpperCamelCase_: int ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCamelCase_ =src_lang
UpperCamelCase_ =self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase_ =self.convert_tokens_to_ids(UpperCamelCase_ )
UpperCamelCase_ =tgt_lang_id
return inputs
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ ={self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: str ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase_ =self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self: List[str] , UpperCamelCase_: str ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: str ):
UpperCamelCase_ ="".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase_ =os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
UpperCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: str = "eng_Latn" , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "fra_Latn" , **UpperCamelCase_: Dict , ):
UpperCamelCase_ =src_lang
UpperCamelCase_ =tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase__ ( self: int ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self: List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: Dict ):
UpperCamelCase_ =self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCamelCase_ =[]
UpperCamelCase_ =[self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase_ =[self.cur_lang_code]
UpperCamelCase_ =[self.eos_token_id]
def UpperCamelCase__ ( self: Dict , UpperCamelCase_: str ):
UpperCamelCase_ =self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCamelCase_ =[]
UpperCamelCase_ =[self.eos_token_id, self.cur_lang_code]
else:
UpperCamelCase_ =[self.cur_lang_code]
UpperCamelCase_ =[self.eos_token_id]
| 391 | 1 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCamelCase = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = 'cpu'
_lowerCamelCase = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
_lowerCamelCase = 'path-to-your-trained-model'
_lowerCamelCase = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCamelCase = pipe.to(device)
# to channels last
_lowerCamelCase = pipe.unet.to(memory_format=torch.channels_last)
_lowerCamelCase = pipe.vae.to(memory_format=torch.channels_last)
_lowerCamelCase = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCamelCase = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCamelCase = torch.randn(2, 4, 64, 64)
_lowerCamelCase = torch.rand(1) * 999
_lowerCamelCase = torch.randn(2, 77, 768)
_lowerCamelCase = (sample, timestep, encoder_hidden_status)
try:
_lowerCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCamelCase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCamelCase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCamelCase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCamelCase = 666
_lowerCamelCase = torch.Generator(device).manual_seed(seed)
_lowerCamelCase = {'generator': generator}
if args.steps is not None:
_lowerCamelCase = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCamelCase = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 112 |
"""simple docstring"""
def __lowercase ( lowerCamelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = set({"(", "[", "{"} )
SCREAMING_SNAKE_CASE__ = set({")", "]", "}"} )
SCREAMING_SNAKE_CASE__ = {"{": "}", "[": "]", "(": ")"}
for i in range(len(lowerCamelCase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase_ ) == 0 or (len(lowerCamelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase_ ) == 0
def __lowercase ( ):
SCREAMING_SNAKE_CASE__ = input("Enter sequence of brackets: " )
if is_balanced(lowerCamelCase_ ):
print(lowerCamelCase_ , "is balanced" )
else:
print(lowerCamelCase_ , "is not balanced" )
if __name__ == "__main__":
main()
| 112 | 1 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Optional[Any] = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
a__ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase = model_type_to_module_name(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = importlib.import_module(f".{module_name}" , '''transformers.models''' )
try:
return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE_ , '''__name__''' , SCREAMING_SNAKE_CASE_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase = importlib.import_module('''transformers''' )
if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return None
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = get_file_from_repo(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as reader:
return json.load(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[str] ):
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(a__ )
def __snake_case ( cls : int , a__ : Union[str, Any] , **a__ : List[str] ):
UpperCAmelCase = kwargs.pop('''config''' , a__ )
UpperCAmelCase = kwargs.pop('''trust_remote_code''' , a__ )
UpperCAmelCase = True
UpperCAmelCase, UpperCAmelCase = ImageProcessingMixin.get_image_processor_dict(a__ , **a__ )
UpperCAmelCase = config_dict.get('''image_processor_type''' , a__ )
UpperCAmelCase = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
UpperCAmelCase = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCAmelCase = config_dict.pop('''feature_extractor_type''' , a__ )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
UpperCAmelCase = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
UpperCAmelCase = config_dict['''auto_map''']['''AutoFeatureExtractor''']
UpperCAmelCase = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(a__ , a__ ):
UpperCAmelCase = AutoConfig.from_pretrained(a__ , **a__ )
# It could be in `config.image_processor_type``
UpperCAmelCase = getattr(a__ , '''image_processor_type''' , a__ )
if hasattr(a__ , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
UpperCAmelCase = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
UpperCAmelCase = image_processor_class_from_name(a__ )
UpperCAmelCase = image_processor_auto_map is not None
UpperCAmelCase = image_processor_class is not None or type(a__ ) in IMAGE_PROCESSOR_MAPPING
UpperCAmelCase = resolve_trust_remote_code(
a__ , a__ , a__ , a__ )
if has_remote_code and trust_remote_code:
UpperCAmelCase = get_class_from_dynamic_module(
a__ , a__ , **a__ )
UpperCAmelCase = kwargs.pop('''code_revision''' , a__ )
if os.path.isdir(a__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(a__ , **a__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(a__ , **a__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(a__ ) in IMAGE_PROCESSOR_MAPPING:
UpperCAmelCase = IMAGE_PROCESSOR_MAPPING[type(a__ )]
return image_processor_class.from_dict(a__ , **a__ )
raise ValueError(
f"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
f"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def __snake_case ( a__ : Union[str, Any] , a__ : Tuple ):
IMAGE_PROCESSOR_MAPPING.register(a__ , a__ )
| 51 |
'''simple docstring'''
from __future__ import annotations
a__ : List[str] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , a__ : dict[str, list[str]] , a__ : str ):
UpperCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase = {}
UpperCAmelCase = source_vertex
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = {self.source_vertex}
UpperCAmelCase = None
UpperCAmelCase = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a__ )
UpperCAmelCase = vertex
queue.append(a__ )
def __snake_case ( self : Any , a__ : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase = self.parent.get(a__ )
if target_vertex_parent is None:
UpperCAmelCase = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(a__ )
return self.shortest_path(a__ ) + f"->{target_vertex}"
if __name__ == "__main__":
a__ : Tuple = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 51 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = ['''MobileViTFeatureExtractor''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Dict = random.Random()
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : int=1.0 , lowercase_ : str=None , lowercase_ : Optional[int]=None ) -> Any:
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=4_0_0 , lowerCamelCase__=2_0_0_0 , lowerCamelCase__=1_0 , lowerCamelCase__=1_6_0 , lowerCamelCase__=8 , lowerCamelCase__=0.0 , lowerCamelCase__=4_0_0_0 , lowerCamelCase__=False , lowerCamelCase__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
_lowerCamelCase = feature_size
_lowerCamelCase = chunk_length
_lowerCamelCase = hop_length
def snake_case__ ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , lowerCamelCase__=False , lowerCamelCase__=False ):
def _flatten(lowerCamelCase__ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
_lowerCamelCase = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
_lowerCamelCase = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
_lowerCamelCase = feat_extract_first.to_dict()
_lowerCamelCase = feat_extract_second.to_dict()
_lowerCamelCase = feat_extract_first.mel_filters
_lowerCamelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCamelCase = feature_extractor(lowerCamelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase = np.asarray(lowerCamelCase__ )
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
_lowerCamelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCamelCase = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def snake_case__ ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = WhisperFeatureExtractor()
_lowerCamelCase = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , lowerCamelCase__ , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = self._load_datasamples(1 )[0]
_lowerCamelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
_lowerCamelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 623 | 1 |
"""simple docstring"""
from PIL import Image
def UpperCAmelCase ( A : Image ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase = 0
_UpperCAmelCase = image.load()
for i in range(A ):
for j in range(A ):
_UpperCAmelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(A ):
for i in range(A ):
_UpperCAmelCase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowercase = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 573 |
"""simple docstring"""
def UpperCAmelCase ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not len(A ) == len(A ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 573 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: List[str] ) -> Any:
'''simple docstring'''
__lowerCamelCase : Dict = b.T
__lowerCamelCase : str = np.sum(np.square(_lowerCamelCase ) , axis=1 )
__lowerCamelCase : Any = np.sum(np.square(_lowerCamelCase ) , axis=0 )
__lowerCamelCase : str = np.matmul(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : str = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( _lowerCamelCase: Any , _lowerCamelCase: str ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : int = x.reshape(-1 , 3 )
__lowerCamelCase : Any = squared_euclidean_distance(_lowerCamelCase , _lowerCamelCase )
return np.argmin(_lowerCamelCase , axis=1 )
class _snake_case ( a__ ):
snake_case__ = ["pixel_values"]
def __init__( self : Any , UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , **UpperCAmelCase : Dict , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : Any = size if size is not None else {"height": 256, "width": 256}
__lowerCamelCase : List[str] = get_size_dict(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = np.array(UpperCAmelCase ) if clusters is not None else None
__lowerCamelCase : int = do_resize
__lowerCamelCase : List[str] = size
__lowerCamelCase : List[str] = resample
__lowerCamelCase : List[str] = do_normalize
__lowerCamelCase : str = do_color_quantize
def lowerCamelCase__ ( self : str , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Union[str, Any] , ):
__lowerCamelCase : Optional[int] = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
UpperCAmelCase , size=(size["height"], size["width"]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , ):
__lowerCamelCase : List[str] = rescale(image=UpperCAmelCase , scale=1 / 1_2_7.5 , data_format=UpperCAmelCase )
__lowerCamelCase : Any = image - 1
return image
def lowerCamelCase__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCAmelCase : str , ):
__lowerCamelCase : Any = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : List[str] = size if size is not None else self.size
__lowerCamelCase : str = get_size_dict(UpperCAmelCase )
__lowerCamelCase : Any = resample if resample is not None else self.resample
__lowerCamelCase : str = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__lowerCamelCase : List[str] = clusters if clusters is not None else self.clusters
__lowerCamelCase : str = np.array(UpperCAmelCase )
__lowerCamelCase : Dict = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
__lowerCamelCase : Union[str, Any] = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
__lowerCamelCase : Dict = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_normalize:
__lowerCamelCase : Union[str, Any] = [self.normalize(image=UpperCAmelCase ) for image in images]
if do_color_quantize:
__lowerCamelCase : List[str] = [to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__lowerCamelCase : Tuple = np.array(UpperCAmelCase )
__lowerCamelCase : List[Any] = color_quantize(UpperCAmelCase , UpperCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__lowerCamelCase : int = images.shape[0]
__lowerCamelCase : Union[str, Any] = images.reshape(UpperCAmelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__lowerCamelCase : Optional[int] = list(UpperCAmelCase )
else:
__lowerCamelCase : List[str] = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
__lowerCamelCase : int = {"input_ids": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase ) | 366 | """simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__A = (720, 1280) # Height, Width
__A = (0.4, 0.6) # if height or width lower than this scale, drop it.
__A = 1 / 100
__A = ''''''
__A = ''''''
__A = ''''''
__A = 250
def lowercase_ ( ) -> None:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : List[Any] = get_dataset(_lowerCamelCase , _lowerCamelCase )
for index in range(_lowerCamelCase ):
__lowerCamelCase : Optional[Any] = random.sample(range(len(_lowerCamelCase ) ) , 4 )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = update_image_and_anno(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , filter_scale=_lowerCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowerCamelCase : Tuple = random_chars(32 )
__lowerCamelCase : Dict = path.split(os.sep )[-1].rsplit("." , 1 )[0]
__lowerCamelCase : List[str] = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__lowerCamelCase : List[Any] = []
for anno in new_annos:
__lowerCamelCase : Any = anno[3] - anno[1]
__lowerCamelCase : Optional[int] = anno[4] - anno[2]
__lowerCamelCase : Optional[int] = anno[1] + width / 2
__lowerCamelCase : Union[str, Any] = anno[2] + height / 2
__lowerCamelCase : int = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(_lowerCamelCase )
with open(F"""{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> tuple[list, list]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Any = []
for label_file in glob.glob(os.path.join(_lowerCamelCase , "*.txt" ) ):
__lowerCamelCase : List[Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_lowerCamelCase ) as in_file:
__lowerCamelCase : Tuple = in_file.readlines()
__lowerCamelCase : List[str] = os.path.join(_lowerCamelCase , F"""{label_name}.jpg""" )
__lowerCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
__lowerCamelCase : str = obj_list.rstrip("\n" ).split(" " )
__lowerCamelCase : Union[str, Any] = float(obj[1] ) - float(obj[3] ) / 2
__lowerCamelCase : Tuple = float(obj[2] ) - float(obj[4] ) / 2
__lowerCamelCase : Union[str, Any] = float(obj[1] ) + float(obj[3] ) / 2
__lowerCamelCase : Any = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_lowerCamelCase )
labels.append(_lowerCamelCase )
return img_paths, labels
def lowercase_ ( _lowerCamelCase: list , _lowerCamelCase: list , _lowerCamelCase: list[int] , _lowerCamelCase: tuple[int, int] , _lowerCamelCase: tuple[float, float] , _lowerCamelCase: float = 0.0 , ) -> tuple[list, list, str]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__lowerCamelCase : Optional[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowerCamelCase : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowerCamelCase : int = int(scale_x * output_size[1] )
__lowerCamelCase : Optional[Any] = int(scale_y * output_size[0] )
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Optional[Any] = []
for i, index in enumerate(_lowerCamelCase ):
__lowerCamelCase : List[str] = all_img_list[index]
path_list.append(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = all_annos[index]
__lowerCamelCase : List[str] = cva.imread(_lowerCamelCase )
if i == 0: # top-left
__lowerCamelCase : List[str] = cva.resize(_lowerCamelCase , (divid_point_x, divid_point_y) )
__lowerCamelCase : Any = img
for bbox in img_annos:
__lowerCamelCase : str = bbox[1] * scale_x
__lowerCamelCase : Union[str, Any] = bbox[2] * scale_y
__lowerCamelCase : Optional[int] = bbox[3] * scale_x
__lowerCamelCase : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__lowerCamelCase : str = cva.resize(_lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
__lowerCamelCase : Any = img
for bbox in img_annos:
__lowerCamelCase : List[Any] = scale_x + bbox[1] * (1 - scale_x)
__lowerCamelCase : List[Any] = bbox[2] * scale_y
__lowerCamelCase : Tuple = scale_x + bbox[3] * (1 - scale_x)
__lowerCamelCase : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__lowerCamelCase : Any = cva.resize(_lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
__lowerCamelCase : List[str] = img
for bbox in img_annos:
__lowerCamelCase : Any = bbox[1] * scale_x
__lowerCamelCase : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__lowerCamelCase : Dict = bbox[3] * scale_x
__lowerCamelCase : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__lowerCamelCase : int = cva.resize(
_lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__lowerCamelCase : Optional[Any] = img
for bbox in img_annos:
__lowerCamelCase : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x)
__lowerCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
__lowerCamelCase : int = scale_x + bbox[3] * (1 - scale_x)
__lowerCamelCase : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__lowerCamelCase : str = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowercase_ ( _lowerCamelCase: int ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__lowerCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''') | 366 | 1 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowerCAmelCase (__A):
"""simple docstring"""
random.seed(__A)
np.random.seed(__A)
torch.manual_seed(__A)
torch.cuda.manual_seed_all(__A)
# ^^ safe to call this function even if cuda is not available
class __A :
'''simple docstring'''
def __init__(self , A , A = 0.9999 , A = 0.0 , A = 0 , A = False , A = 1.0 , A = 2 / 3 , A = None , A = None , **A , ) -> List[str]:
"""simple docstring"""
if isinstance(A , torch.nn.Module ):
_a = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , A , standard_warn=A , )
_a = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_a = True
if kwargs.get('''max_value''' , A ) is not None:
_a = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , A , standard_warn=A )
_a = kwargs['''max_value''']
if kwargs.get('''min_value''' , A ) is not None:
_a = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , A , standard_warn=A )
_a = kwargs['''min_value''']
_a = list(A )
_a = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , A ) is not None:
_a = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , A , standard_warn=A )
self.to(device=kwargs['''device'''] )
_a = None
_a = decay
_a = min_decay
_a = update_after_step
_a = use_ema_warmup
_a = inv_gamma
_a = power
_a = 0
_a = None # set in `step()`
_a = model_cls
_a = model_config
@classmethod
def a__ (cls , A , A ) -> "EMAModel":
"""simple docstring"""
_a , _a = model_cls.load_config(A , return_unused_kwargs=A )
_a = model_cls.from_pretrained(A )
_a = cls(model.parameters() , model_cls=A , model_config=model.config )
ema_model.load_state_dict(A )
return ema_model
def a__ (self , A ) -> Tuple:
"""simple docstring"""
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
_a = self.model_cls.from_config(self.model_config )
_a = self.state_dict()
state_dict.pop('''shadow_params''' , A )
model.register_to_config(**A )
self.copy_to(model.parameters() )
model.save_pretrained(A )
def a__ (self , A ) -> float:
"""simple docstring"""
_a = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_a = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_a = (1 + step) / (10 + step)
_a = min(A , self.decay )
# make sure decay is not smaller than min_decay
_a = max(A , self.min_decay )
return cur_decay_value
@torch.no_grad()
def a__ (self , A ) -> Optional[Any]:
"""simple docstring"""
if isinstance(A , torch.nn.Module ):
_a = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , A , standard_warn=A , )
_a = parameters.parameters()
_a = list(A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_a = self.get_decay(self.optimization_step )
_a = decay
_a = 1 - decay
_a = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_a = deepspeed.zero.GatheredParameters(A , modifier_rank=A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(A )
def a__ (self , A ) -> None:
"""simple docstring"""
_a = list(A )
for s_param, param in zip(self.shadow_params , A ):
param.data.copy_(s_param.to(param.device ).data )
def a__ (self , A=None , A=None ) -> None:
"""simple docstring"""
_a = [
p.to(device=A , dtype=A ) if p.is_floating_point() else p.to(device=A )
for p in self.shadow_params
]
def a__ (self ) -> dict:
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def a__ (self , A ) -> None:
"""simple docstring"""
_a = [param.detach().cpu().clone() for param in parameters]
def a__ (self , A ) -> None:
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_a = None
def a__ (self , A ) -> None:
"""simple docstring"""
_a = copy.deepcopy(A )
_a = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
_a = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , A ):
raise ValueError('''Invalid min_decay''' )
_a = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , A ):
raise ValueError('''Invalid optimization_step''' )
_a = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , A ):
raise ValueError('''Invalid update_after_step''' )
_a = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , A ):
raise ValueError('''Invalid use_ema_warmup''' )
_a = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
_a = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
_a = state_dict.get('''shadow_params''' , A )
if shadow_params is not None:
_a = shadow_params
if not isinstance(self.shadow_params , A ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(A , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 11 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase_ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __A :
'''simple docstring'''
def __init__(self , A = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
_a = primes[group]['''prime''']
_a = primes[group]['''generator''']
_a = int(hexlify(urandom(32 ) ) , base=16 )
def a__ (self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def a__ (self ) -> str:
"""simple docstring"""
_a = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def a__ (self , A ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def a__ (self , A ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
if not self.is_valid_public_key(A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def a__ (A , A ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def a__ (A , A , A = 14 ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
_a = int(A , base=16 )
_a = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 1 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 64 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] ,A : Optional[int] ,A : int=13 ,A : Tuple=7 ,A : Dict=True ,A : Optional[int]=True ,A : Tuple=True ,A : str=True ,A : Any=99 ,A : Tuple=32 ,A : Dict=5 ,A : Optional[int]=4 ,A : Dict=37 ,A : Any="gelu" ,A : Any=0.1 ,A : Optional[int]=0.1 ,A : Union[str, Any]=512 ,A : Any=16 ,A : List[str]=2 ,A : List[Any]=0.0_2 ,A : Optional[int]=4 ,):
'''simple docstring'''
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : List[Any] = seq_length
UpperCAmelCase__ : Optional[int] = is_training
UpperCAmelCase__ : Optional[Any] = use_attention_mask
UpperCAmelCase__ : int = use_token_type_ids
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : List[Any] = type_vocab_size
UpperCAmelCase__ : List[str] = type_sequence_label_size
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : List[Any] = num_choices
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : List[str] = None
if self.use_attention_mask:
UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : int = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=A ,)
return config, input_ids, attention_mask
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = config_and_inputs
UpperCAmelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = FlaxDistilBertModelTester(self )
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained("""distilbert-base-uncased""" )
UpperCAmelCase__ : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
@require_flax
class __lowercase ( unittest.TestCase ):
@slow
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
UpperCAmelCase__ : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCAmelCase__ : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCAmelCase__ : Dict = model(A ,attention_mask=A )[0]
UpperCAmelCase__ : List[Any] = (1, 11, 768)
self.assertEqual(output.shape ,A )
UpperCAmelCase__ : Any = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,A ,atol=1e-4 ) )
| 65 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = torch.exp(_lowerCAmelCase )
__lowerCAmelCase = torch.sum(_lowerCAmelCase , dim=1 ) # sum of exp(x_i)
__lowerCAmelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCAmelCase ) - B / A
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ ) -> Optional[Any]:
super().__init__()
__lowerCAmelCase = config.output_attentions
__lowerCAmelCase = config.output_hidden_states
__lowerCAmelCase = nn.ModuleList([BertLayer(snake_case_ ) for _ in range(config.num_hidden_layers )] )
__lowerCAmelCase = nn.ModuleList([BertHighway(snake_case_ ) for _ in range(config.num_hidden_layers )] )
__lowerCAmelCase = [-1 for _ in range(config.num_hidden_layers )]
def A__ ( self , snake_case_ ) -> List[Any]:
if (type(snake_case_ ) is float) or (type(snake_case_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
__lowerCAmelCase = x
else:
__lowerCAmelCase = x
def A__ ( self , snake_case_ ) -> Dict:
__lowerCAmelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def A__ ( self , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , ) -> List[str]:
__lowerCAmelCase = ()
__lowerCAmelCase = ()
__lowerCAmelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__lowerCAmelCase = all_hidden_states + (hidden_states,)
__lowerCAmelCase = layer_module(
snake_case_ , snake_case_ , head_mask[i] , snake_case_ , snake_case_ )
__lowerCAmelCase = layer_outputs[0]
if self.output_attentions:
__lowerCAmelCase = all_attentions + (layer_outputs[1],)
__lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
__lowerCAmelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCAmelCase = current_outputs + (all_attentions,)
__lowerCAmelCase = self.highway[i](snake_case_ )
# logits, pooled_output
if not self.training:
__lowerCAmelCase = highway_exit[0]
__lowerCAmelCase = entropy(snake_case_ )
__lowerCAmelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__lowerCAmelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__lowerCAmelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(snake_case_ , i + 1 )
else:
__lowerCAmelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__lowerCAmelCase = all_hidden_states + (hidden_states,)
__lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
__lowerCAmelCase = outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCAmelCase = outputs + (all_attentions,)
__lowerCAmelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , A__ , )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , snake_case_ ) -> Tuple:
super().__init__(snake_case_ )
__lowerCAmelCase = config
__lowerCAmelCase = BertEmbeddings(snake_case_ )
__lowerCAmelCase = DeeBertEncoder(snake_case_ )
__lowerCAmelCase = BertPooler(snake_case_ )
self.init_weights()
def A__ ( self ) -> Dict:
self.encoder.init_highway_pooler(self.pooler )
def A__ ( self ) -> List[Any]:
return self.embeddings.word_embeddings
def A__ ( self , snake_case_ ) -> Tuple:
__lowerCAmelCase = value
def A__ ( self , snake_case_ ) -> List[str]:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(snake_case_ )
@add_start_docstrings_to_model_forward(snake_case_ )
def A__ ( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , ) -> Optional[Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
__lowerCAmelCase = input_ids.size()
elif inputs_embeds is not None:
__lowerCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
__lowerCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowerCAmelCase = torch.ones(snake_case_ , device=snake_case_ )
if encoder_attention_mask is None:
__lowerCAmelCase = torch.ones(snake_case_ , device=snake_case_ )
if token_type_ids is None:
__lowerCAmelCase = torch.zeros(snake_case_ , dtype=torch.long , device=snake_case_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowerCAmelCase = self.get_extended_attention_mask(snake_case_ , snake_case_ , snake_case_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__lowerCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__lowerCAmelCase = encoder_attention_mask[:, None, None, :]
__lowerCAmelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__lowerCAmelCase = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowerCAmelCase = self.get_head_mask(snake_case_ , self.config.num_hidden_layers )
__lowerCAmelCase = self.embeddings(
input_ids=snake_case_ , position_ids=snake_case_ , token_type_ids=snake_case_ , inputs_embeds=snake_case_ )
__lowerCAmelCase = self.encoder(
snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
__lowerCAmelCase = encoder_outputs[0]
__lowerCAmelCase = self.pooler(snake_case_ )
__lowerCAmelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ) -> Optional[int]:
__lowerCAmelCase = message
__lowerCAmelCase = exit_layer # start from 1!
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ ) -> List[Any]:
super().__init__()
__lowerCAmelCase = BertPooler(snake_case_ )
__lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
__lowerCAmelCase = nn.Linear(config.hidden_size , config.num_labels )
def A__ ( self , snake_case_ ) -> Optional[int]:
# Pooler
__lowerCAmelCase = encoder_outputs[0]
__lowerCAmelCase = self.pooler(snake_case_ )
# "return" pooler_output
# BertModel
__lowerCAmelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__lowerCAmelCase = bmodel_output[1]
__lowerCAmelCase = self.dropout(snake_case_ )
__lowerCAmelCase = self.classifier(snake_case_ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , A__ , )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , snake_case_ ) -> Optional[Any]:
super().__init__(snake_case_ )
__lowerCAmelCase = config.num_labels
__lowerCAmelCase = config.num_hidden_layers
__lowerCAmelCase = DeeBertModel(snake_case_ )
__lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
__lowerCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case_ )
def A__ ( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=-1 , snake_case_=False , ) -> Optional[int]:
__lowerCAmelCase = self.num_layers
try:
__lowerCAmelCase = self.bert(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , position_ids=snake_case_ , head_mask=snake_case_ , inputs_embeds=snake_case_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__lowerCAmelCase = outputs[1]
__lowerCAmelCase = self.dropout(snake_case_ )
__lowerCAmelCase = self.classifier(snake_case_ )
__lowerCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowerCAmelCase = e.message
__lowerCAmelCase = e.exit_layer
__lowerCAmelCase = outputs[0]
if not self.training:
__lowerCAmelCase = entropy(snake_case_ )
__lowerCAmelCase = []
__lowerCAmelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase = MSELoss()
__lowerCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__lowerCAmelCase = []
for highway_exit in outputs[-1]:
__lowerCAmelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(snake_case_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase = MSELoss()
__lowerCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(snake_case_ )
if train_highway:
__lowerCAmelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowerCAmelCase = (loss,) + outputs
if not self.training:
__lowerCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowerCAmelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 465 | 0 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE = 14 ) -> None:
if group not in primes:
raise ValueError('Unsupported Group' )
__lowerCAmelCase : List[str] = primes[group]['prime']
__lowerCAmelCase : Tuple = primes[group]['generator']
__lowerCAmelCase : Optional[int] = int(hexlify(urandom(32 ) ) , base=16 )
def snake_case ( self ) -> str:
return hex(self.__private_key )[2:]
def snake_case ( self ) -> str:
__lowerCAmelCase : Dict = pow(self.generator , self.__private_key , self.prime )
return hex(SCREAMING_SNAKE_CASE )[2:]
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(SCREAMING_SNAKE_CASE , (self.prime - 1) // 2 , self.prime ) == 1
)
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase : List[Any] = int(SCREAMING_SNAKE_CASE , base=16 )
if not self.is_valid_public_key(SCREAMING_SNAKE_CASE ):
raise ValueError('Invalid public key' )
__lowerCAmelCase : Tuple = pow(SCREAMING_SNAKE_CASE , self.__private_key , self.prime )
return shaaaa(str(SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
@staticmethod
def snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(SCREAMING_SNAKE_CASE , (prime - 1) // 2 , SCREAMING_SNAKE_CASE ) == 1
)
@staticmethod
def snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 14 ) -> str:
__lowerCAmelCase : Tuple = int(SCREAMING_SNAKE_CASE , base=16 )
__lowerCAmelCase : Any = int(SCREAMING_SNAKE_CASE , base=16 )
__lowerCAmelCase : Optional[int] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('Invalid public key' )
__lowerCAmelCase : List[Any] = pow(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return shaaaa(str(SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A_ = None
A_ = logging.get_logger(__name__)
A_ = "▁"
A_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
A_ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
A_ = {
"google/pegasus-xsum": 5_12,
}
class UpperCamelCase__ ( a ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = PegasusTokenizer
_snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<mask_2>" , SCREAMING_SNAKE_CASE="<mask_1>" , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1_03 , **SCREAMING_SNAKE_CASE , ) -> List[str]:
__lowerCAmelCase : List[str] = offset
if additional_special_tokens is not None:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError(
F"""additional_special_tokens should be of type {type(SCREAMING_SNAKE_CASE )}, but is"""
F""" {type(SCREAMING_SNAKE_CASE )}""" )
__lowerCAmelCase : Dict = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(SCREAMING_SNAKE_CASE ) ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
__lowerCAmelCase : Tuple = additional_special_tokens_extended
else:
__lowerCAmelCase : List[str] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , mask_token_sent=SCREAMING_SNAKE_CASE , offset=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = vocab_file
__lowerCAmelCase : Union[str, Any] = False if not self.vocab_file else True
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
__lowerCAmelCase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 123 | 1 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ ( enum.Enum):
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 2
@add_end_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self, *__a, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_lowerCAmelCase : List[Any] = None
if self.model.config.prefix is not None:
_lowerCAmelCase : Any = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_lowerCAmelCase : Optional[int] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self._sanitize_parameters(prefix=__a, **self._forward_params)
_lowerCAmelCase : Dict = {**self._preprocess_params, **preprocess_params}
_lowerCAmelCase : List[Any] = {**self._forward_params, **forward_params}
def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = {}
if prefix is not None:
_lowerCAmelCase : List[Any] = prefix
if prefix:
_lowerCAmelCase : Any = self.tokenizer(
__a, padding=__a, add_special_tokens=__a, return_tensors=self.framework)
_lowerCAmelCase : int = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
" [None, 'hole']")
_lowerCAmelCase : Any = handle_long_generation
preprocess_params.update(__a)
_lowerCAmelCase : Any = generate_kwargs
_lowerCAmelCase : Tuple = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`")
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`")
_lowerCAmelCase : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`")
_lowerCAmelCase : str = ReturnType.TENSORS
if return_type is not None:
_lowerCAmelCase : Tuple = return_type
if clean_up_tokenization_spaces is not None:
_lowerCAmelCase : Optional[int] = clean_up_tokenization_spaces
if stop_sequence is not None:
_lowerCAmelCase : str = self.tokenizer.encode(__a, add_special_tokens=__a)
if len(__a) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_lowerCAmelCase : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True})
return super()._parse_and_tokenize(*__a, **__a)
def __call__( self, __a, **__a):
'''simple docstring'''
return super().__call__(__a, **__a)
def snake_case__ ( self, __a, __a="", __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.tokenizer(
prefix + prompt_text, padding=__a, add_special_tokens=__a, return_tensors=self.framework)
_lowerCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
_lowerCAmelCase : str = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
_lowerCAmelCase : Union[str, Any] = generate_kwargs["max_new_tokens"]
else:
_lowerCAmelCase : List[str] = generate_kwargs.get("max_length", self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected")
if cur_len + new_tokens > self.tokenizer.model_max_length:
_lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length")
_lowerCAmelCase : Any = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
_lowerCAmelCase : List[str] = inputs["attention_mask"][:, -keep_length:]
return inputs
def snake_case__ ( self, __a, **__a):
'''simple docstring'''
_lowerCAmelCase : Any = model_inputs["input_ids"]
_lowerCAmelCase : Any = model_inputs.get("attention_mask", __a)
# Allow empty prompts
if input_ids.shape[1] == 0:
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Dict = 1
else:
_lowerCAmelCase : Tuple = input_ids.shape[0]
_lowerCAmelCase : str = model_inputs.pop("prompt_text")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_lowerCAmelCase : Optional[Any] = generate_kwargs.pop("prefix_length", 0)
if prefix_length > 0:
_lowerCAmelCase : Tuple = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
_lowerCAmelCase : Optional[int] = generate_kwargs.get("max_length") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_lowerCAmelCase : Dict = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_lowerCAmelCase : str = self.model.generate(input_ids=__a, attention_mask=__a, **__a)
_lowerCAmelCase : Union[str, Any] = generated_sequence.shape[0]
if self.framework == "pt":
_lowerCAmelCase : Tuple = generated_sequence.reshape(__a, out_b // in_b, *generated_sequence.shape[1:])
elif self.framework == "tf":
_lowerCAmelCase : Any = tf.reshape(__a, (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def snake_case__ ( self, __a, __a=ReturnType.FULL_TEXT, __a=True):
'''simple docstring'''
_lowerCAmelCase : Dict = model_outputs["generated_sequence"][0]
_lowerCAmelCase : int = model_outputs["input_ids"]
_lowerCAmelCase : List[str] = model_outputs["prompt_text"]
_lowerCAmelCase : int = generated_sequence.numpy().tolist()
_lowerCAmelCase : List[Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_lowerCAmelCase : Tuple = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_lowerCAmelCase : Optional[Any] = self.tokenizer.decode(
__a, skip_special_tokens=__a, clean_up_tokenization_spaces=__a, )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_lowerCAmelCase : List[str] = 0
else:
_lowerCAmelCase : str = len(
self.tokenizer.decode(
input_ids[0], skip_special_tokens=__a, clean_up_tokenization_spaces=__a, ))
if return_type == ReturnType.FULL_TEXT:
_lowerCAmelCase : List[Any] = prompt_text + text[prompt_length:]
else:
_lowerCAmelCase : Any = text[prompt_length:]
_lowerCAmelCase : int = {"generated_text": all_text}
records.append(__a)
return records
| 500 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'gptj'
lowerCamelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=5_0400, __a=2048, __a=4096, __a=28, __a=16, __a=64, __a=None, __a="gelu_new", __a=0.0, __a=0.0, __a=0.0, __a=1E-5, __a=0.02, __a=True, __a=5_0256, __a=5_0256, __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Dict = n_positions
_lowerCAmelCase : Any = n_embd
_lowerCAmelCase : Optional[Any] = n_layer
_lowerCAmelCase : List[str] = n_head
_lowerCAmelCase : Tuple = n_inner
_lowerCAmelCase : Union[str, Any] = rotary_dim
_lowerCAmelCase : Optional[int] = activation_function
_lowerCAmelCase : List[str] = resid_pdrop
_lowerCAmelCase : int = embd_pdrop
_lowerCAmelCase : int = attn_pdrop
_lowerCAmelCase : Tuple = layer_norm_epsilon
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : Any = use_cache
_lowerCAmelCase : str = bos_token_id
_lowerCAmelCase : List[Any] = eos_token_id
super().__init__(
bos_token_id=__a, eos_token_id=__a, tie_word_embeddings=__a, **__a)
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a = "default", __a = None, __a = False, ):
'''simple docstring'''
super().__init__(__a, task=__a, patching_specs=__a, use_past=__a)
if not getattr(self._config, "pad_token_id", __a):
# TODO: how to do that better?
_lowerCAmelCase : Optional[int] = 0
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
self.fill_with_past_key_values_(__a, direction="inputs")
_lowerCAmelCase : Tuple = {0: "batch", 1: "past_sequence + sequence"}
else:
_lowerCAmelCase : str = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return self._config.n_layer
@property
def snake_case__ ( self):
'''simple docstring'''
return self._config.n_head
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = super(__a, self).generate_dummy_inputs(
__a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a)
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Any = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Tuple = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Optional[int] = seqlen + 2
_lowerCAmelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : Optional[int] = [
(torch.zeros(__a), torch.zeros(__a)) for _ in range(self.num_layers)
]
_lowerCAmelCase : Any = common_inputs["attention_mask"]
if self.use_past:
_lowerCAmelCase : Tuple = ordered_inputs["attention_mask"].dtype
_lowerCAmelCase : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__a, __a, dtype=__a)], dim=1)
return ordered_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return 13
| 500 | 1 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowercase = sys.version_info >= (3, 10)
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class lowercase_ :
__lowerCamelCase = 4_2
__lowerCamelCase = 4_2
__lowerCamelCase = 4_2
__lowerCamelCase = 4_2
@dataclass
class lowercase_ :
__lowerCamelCase = 4_2
__lowerCamelCase = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class lowercase_ :
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = None
class lowercase_ ( UpperCamelCase__ ):
__lowerCamelCase = "titi"
__lowerCamelCase = "toto"
class lowercase_ ( UpperCamelCase__ ):
__lowerCamelCase = "titi"
__lowerCamelCase = "toto"
__lowerCamelCase = 4_2
@dataclass
class lowercase_ :
__lowerCamelCase = "toto"
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Any =BasicEnum(self.foo )
@dataclass
class lowercase_ :
__lowerCamelCase = "toto"
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : str =MixedTypeEnum(self.foo )
@dataclass
class lowercase_ :
__lowerCamelCase = None
__lowerCamelCase = field(default=UpperCamelCase__ , metadata={"help": "help message"} )
__lowerCamelCase = None
__lowerCamelCase = list_field(default=[] )
__lowerCamelCase = list_field(default=[] )
@dataclass
class lowercase_ :
__lowerCamelCase = list_field(default=[] )
__lowerCamelCase = list_field(default=[1, 2, 3] )
__lowerCamelCase = list_field(default=["Hallo", "Bonjour", "Hello"] )
__lowerCamelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
__lowerCamelCase = field()
__lowerCamelCase = field()
__lowerCamelCase = field()
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : int =BasicEnum(self.required_enum )
@dataclass
class lowercase_ :
__lowerCamelCase = 4_2
__lowerCamelCase = field()
__lowerCamelCase = None
__lowerCamelCase = field(default="toto" , metadata={"help": "help message"} )
__lowerCamelCase = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = None
@dataclass
class lowercase_ :
__lowerCamelCase = None
__lowerCamelCase = field(default=UpperCamelCase__ , metadata={"help": "help message"} )
__lowerCamelCase = None
__lowerCamelCase = list_field(default=[] )
__lowerCamelCase = list_field(default=[] )
class lowercase_ ( unittest.TestCase ):
def _snake_case ( self , __A , __A ) -> List[str]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
SCREAMING_SNAKE_CASE_ : Any ={k: v for k, v in vars(__A ).items() if k != '''container'''}
SCREAMING_SNAKE_CASE_ : Tuple ={k: v for k, v in vars(__A ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , __A ) and yy.get('''choices''' , __A ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](__A ) , yy['''type'''](__A ) )
del xx["type"], yy["type"]
self.assertEqual(__A , __A )
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ : Dict =HfArgumentParser(__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__A , required=__A )
expected.add_argument('''--bar''' , type=__A , required=__A )
expected.add_argument('''--baz''' , type=__A , required=__A )
expected.add_argument('''--flag''' , type=__A , default=__A , const=__A , nargs='''?''' )
self.argparsersEqual(__A , __A )
SCREAMING_SNAKE_CASE_ : Any =['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((SCREAMING_SNAKE_CASE_ ) , ) : int =parser.parse_args_into_dataclasses(__A , look_for_args_file=__A )
self.assertFalse(example.flag )
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : int =HfArgumentParser(__A )
SCREAMING_SNAKE_CASE_ : Optional[int] =argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=__A )
expected.add_argument('''--baz''' , default='''toto''' , type=__A , help='''help message''' )
self.argparsersEqual(__A , __A )
def _snake_case ( self ) -> Any:
SCREAMING_SNAKE_CASE_ : Any =argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__A , default=__A , const=__A , nargs='''?''' )
expected.add_argument('''--baz''' , type=__A , default=__A , const=__A , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=__A , dest='''baz''' )
expected.add_argument('''--opt''' , type=__A , default=__A )
SCREAMING_SNAKE_CASE_ : Tuple =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE_ : Dict =HfArgumentParser(__A )
self.argparsersEqual(__A , __A )
SCREAMING_SNAKE_CASE_ : List[str] =parser.parse_args([] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
SCREAMING_SNAKE_CASE_ : Optional[int] =parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
SCREAMING_SNAKE_CASE_ : Optional[int] =parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
SCREAMING_SNAKE_CASE_ : Tuple =parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =HfArgumentParser(__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(__A , __A )
SCREAMING_SNAKE_CASE_ : Any =parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
SCREAMING_SNAKE_CASE_ : int =parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
SCREAMING_SNAKE_CASE_ : Any =parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
SCREAMING_SNAKE_CASE_ : Optional[int] =parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
SCREAMING_SNAKE_CASE_ : Any =parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
SCREAMING_SNAKE_CASE_ : Any =parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _snake_case ( self ) -> Any:
@dataclass
class lowercase_ :
__lowerCamelCase = "toto"
SCREAMING_SNAKE_CASE_ : Any =HfArgumentParser(__A )
SCREAMING_SNAKE_CASE_ : int =argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(__A , __A )
SCREAMING_SNAKE_CASE_ : List[Any] =parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
SCREAMING_SNAKE_CASE_ : List[Any] =parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
SCREAMING_SNAKE_CASE_ : str =parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =HfArgumentParser(__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__A )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__A )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__A )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__A )
self.argparsersEqual(__A , __A )
SCREAMING_SNAKE_CASE_ : List[str] =parser.parse_args([] )
self.assertEqual(
__A , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
SCREAMING_SNAKE_CASE_ : int =parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(__A , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Optional[int] =argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=__A , type=__A )
expected.add_argument('''--bar''' , default=__A , type=__A , help='''help message''' )
expected.add_argument('''--baz''' , default=__A , type=__A )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__A )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__A )
SCREAMING_SNAKE_CASE_ : Dict =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE_ : Dict =HfArgumentParser(__A )
self.argparsersEqual(__A , __A )
SCREAMING_SNAKE_CASE_ : str =parser.parse_args([] )
self.assertEqual(__A , Namespace(foo=__A , bar=__A , baz=__A , ces=[] , des=[] ) )
SCREAMING_SNAKE_CASE_ : Any =parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(__A , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Any =HfArgumentParser(__A )
SCREAMING_SNAKE_CASE_ : List[str] =argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=__A , required=__A )
expected.add_argument('''--required_str''' , type=__A , required=__A )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__A , )
self.argparsersEqual(__A , __A )
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : str =HfArgumentParser(__A )
SCREAMING_SNAKE_CASE_ : Dict =argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=__A , required=__A )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__A , )
expected.add_argument('''--opt''' , type=__A , default=__A )
expected.add_argument('''--baz''' , default='''toto''' , type=__A , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__A )
self.argparsersEqual(__A , __A )
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =HfArgumentParser(__A )
SCREAMING_SNAKE_CASE_ : Dict ={
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] =parser.parse_dict(__A )[0]
SCREAMING_SNAKE_CASE_ : List[str] =BasicExample(**__A )
self.assertEqual(__A , __A )
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : List[str] =HfArgumentParser(__A )
SCREAMING_SNAKE_CASE_ : Optional[int] ={
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(__A , parser.parse_dict , __A , allow_extra_keys=__A )
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : str =HfArgumentParser(__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] ={
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : List[str] =os.path.join(__A , '''temp_json''' )
os.mkdir(__A )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(__A , __A )
SCREAMING_SNAKE_CASE_ : int =parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
SCREAMING_SNAKE_CASE_ : List[str] =BasicExample(**__A )
self.assertEqual(__A , __A )
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =HfArgumentParser(__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] ={
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Optional[int] =os.path.join(__A , '''temp_yaml''' )
os.mkdir(__A )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(__A , __A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
SCREAMING_SNAKE_CASE_ : str =BasicExample(**__A )
self.assertEqual(__A , __A )
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ : Dict =HfArgumentParser(__A )
self.assertIsNotNone(__A )
| 715 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ ( A ):
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=False , __A=True , __A="None" , __A=3 , __A=4 , __A=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Any =parent
SCREAMING_SNAKE_CASE_ : Tuple =batch_size
SCREAMING_SNAKE_CASE_ : Dict =seq_length
SCREAMING_SNAKE_CASE_ : Dict =is_training
SCREAMING_SNAKE_CASE_ : Any =use_input_mask
SCREAMING_SNAKE_CASE_ : Dict =use_token_type_ids
SCREAMING_SNAKE_CASE_ : List[Any] =use_labels
SCREAMING_SNAKE_CASE_ : Any =vocab_size
SCREAMING_SNAKE_CASE_ : Dict =hidden_size
SCREAMING_SNAKE_CASE_ : int =num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any =num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE_ : Dict =hidden_act
SCREAMING_SNAKE_CASE_ : Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str =max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] =type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Dict =initializer_range
SCREAMING_SNAKE_CASE_ : List[str] =num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] =num_choices
SCREAMING_SNAKE_CASE_ : Optional[Any] =relative_attention
SCREAMING_SNAKE_CASE_ : Optional[Any] =position_biased_input
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pos_att_type
SCREAMING_SNAKE_CASE_ : Tuple =scope
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : int =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE_ : Tuple =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] =None
SCREAMING_SNAKE_CASE_ : List[str] =None
SCREAMING_SNAKE_CASE_ : Optional[int] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : str =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ) -> Dict:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _snake_case ( self , __A ) -> Tuple:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : str =DebertaVaModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : int =model(__A , attention_mask=__A , token_type_ids=__A )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =model(__A , token_type_ids=__A )[0]
SCREAMING_SNAKE_CASE_ : int =model(__A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ) -> str:
SCREAMING_SNAKE_CASE_ : List[str] =DebertaVaForMaskedLM(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : int =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Tuple =self.num_labels
SCREAMING_SNAKE_CASE_ : Dict =DebertaVaForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__A )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ) -> Dict:
SCREAMING_SNAKE_CASE_ : str =self.num_labels
SCREAMING_SNAKE_CASE_ : int =DebertaVaForTokenClassification(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Any =DebertaVaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict =model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Any =DebertaVaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Tuple =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Any =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Dict =model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : List[str] =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[Any] =config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( A , A , unittest.TestCase ):
__lowerCamelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : int =DebertaVaModelTester(self )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =ConfigTester(self , config_class=__A , hidden_size=37 )
def _snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__A )
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__A )
def _snake_case ( self ) -> Any:
SCREAMING_SNAKE_CASE_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__A )
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__A )
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__A )
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__A )
@slow
def _snake_case ( self ) -> Tuple:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =DebertaVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def _snake_case ( self ) -> str:
pass
@slow
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ : List[Any] =DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
SCREAMING_SNAKE_CASE_ : List[str] =torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE_ : Tuple =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] =model(__A , attention_mask=__A )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ : int =torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) , F'{output[:, 1:4, 1:4]}' )
| 431 | 0 |
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ) -> List[Any]:
# ===== initialization =====
__lowerCamelCase : Dict = Mock()
__lowerCamelCase : Union[str, Any] = conn, Mock()
__lowerCamelCase : Dict = iter([1, None] )
__lowerCamelCase : Tuple = lambda UpperCAmelCase_ : next(UpperCAmelCase_ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=UpperCAmelCase_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 13 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 62 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_snake_case = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 659 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def snake_case ( )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_a , help='where to store parsed gold_data_path file' , )
lowerCamelCase__ = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowerCamelCase__ = json.load(_a )
for dpr_record in tqdm(_a ):
lowerCamelCase__ = dpr_record['question']
lowerCamelCase__ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_a ) + '\n' )
if __name__ == "__main__":
main()
| 659 | 1 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _UpperCamelCase ( A , A=7 ):
UpperCamelCase_ =None
if token is not None:
UpperCamelCase_ ={"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
UpperCamelCase_ ="636036"
UpperCamelCase_ =f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
UpperCamelCase_ =requests.get(A , headers=A ).json()
return result["workflow_runs"]
def _UpperCamelCase ( A ):
UpperCamelCase_ =get_daily_ci_runs(A )
UpperCamelCase_ =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
UpperCamelCase_ =workflow_run["id"]
break
return workflow_run_id
def _UpperCamelCase ( A , A , A ):
UpperCamelCase_ =get_last_daily_ci_runs(A )
if workflow_run_id is not None:
UpperCamelCase_ =get_artifacts_links(worflow_run_id=A , token=A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
UpperCamelCase_ =artifacts_links[artifact_name]
download_artifact(
artifact_name=A , artifact_url=A , output_dir=A , token=A )
def _UpperCamelCase ( A , A , A ):
get_last_daily_ci_artifacts(A , A , A )
UpperCamelCase_ ={}
for artifact_name in artifact_names:
UpperCamelCase_ =os.path.join(A , f"""{artifact_name}.zip""" )
if os.path.isfile(A ):
UpperCamelCase_ ={}
with zipfile.ZipFile(A ) as z:
for filename in z.namelist():
if not os.path.isdir(A ):
# read the file
with z.open(A ) as f:
UpperCamelCase_ =f.read().decode("UTF-8" )
return results
| 391 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 391 | 1 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowerCAmelCase (_lowercase ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def _lowerCAmelCase (_lowercase ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def lowerCAmelCase_ ( self : str ):
a__ = {}
a__ = []
a__ = 1
a__ = [1, 2]
a__ = {"a": 1, "b": 2}
a__ = {"a": [1, 2], "b": [3, 4]}
a__ = {"a": {"1": 1}, "b": 2}
a__ = {"a": 1, "b": 2, "c": 3, "d": 4}
a__ = {}
a__ = []
a__ = 2
a__ = [2, 3]
a__ = {"a": 2, "b": 3}
a__ = {"a": [2, 3], "b": [4, 5]}
a__ = {"a": {"1": 2}, "b": 3}
a__ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(a__ ,a__ ) ,a__ )
self.assertEqual(map_nested(a__ ,a__ ) ,a__ )
self.assertEqual(map_nested(a__ ,a__ ) ,a__ )
self.assertEqual(map_nested(a__ ,a__ ) ,a__ )
self.assertEqual(map_nested(a__ ,a__ ) ,a__ )
self.assertEqual(map_nested(a__ ,a__ ) ,a__ )
self.assertEqual(map_nested(a__ ,a__ ) ,a__ )
self.assertEqual(map_nested(a__ ,a__ ) ,a__ )
a__ = 2
self.assertEqual(map_nested(a__ ,a__ ,num_proc=a__ ) ,a__ )
self.assertEqual(map_nested(a__ ,a__ ,num_proc=a__ ) ,a__ )
self.assertEqual(map_nested(a__ ,a__ ,num_proc=a__ ) ,a__ )
self.assertEqual(map_nested(a__ ,a__ ,num_proc=a__ ) ,a__ )
self.assertEqual(map_nested(a__ ,a__ ,num_proc=a__ ) ,a__ )
self.assertEqual(map_nested(a__ ,a__ ,num_proc=a__ ) ,a__ )
self.assertEqual(map_nested(a__ ,a__ ,num_proc=a__ ) ,a__ )
self.assertEqual(map_nested(a__ ,a__ ,num_proc=a__ ) ,a__ )
a__ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
a__ = {"a": 2, "b": 0, "c": 2}
a__ = {
"a": np.eye(2 ).astype(a__ ),
"b": np.zeros(3 ).astype(a__ ),
"c": np.ones(2 ).astype(a__ ),
}
self.assertEqual(map_nested(a__ ,a__ ,map_numpy=a__ ) ,a__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a__ ,a__ ,map_numpy=a__ ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,)
self.assertEqual(map_nested(a__ ,a__ ,map_numpy=a__ ,num_proc=a__ ) ,a__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a__ ,a__ ,map_numpy=a__ ,num_proc=a__ ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,)
with self.assertRaises(a__ ): # can't pickle a local lambda
map_nested(lambda a__ : x + 1 ,a__ ,num_proc=a__ )
def lowerCAmelCase_ ( self : str ):
a__ = {"a": 1, "b": 2}
a__ = {"a": 3, "b": 4}
a__ = {"a": 5, "b": 6}
a__ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(a__ ,a__ ,a__ ) ) ,a__ )
def lowerCAmelCase_ ( self : List[str] ):
class lowerCamelCase__ :
"""simple docstring"""
UpperCamelCase__ = '''bar'''
a__ = Foo()
self.assertEqual(foo.my_attr ,"bar" )
with temporary_assignment(a__ ,"my_attr" ,"BAR" ):
self.assertEqual(foo.my_attr ,"BAR" )
self.assertEqual(foo.my_attr ,"bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
a__ = {F'{i}': i for i in range(_lowercase )}
a__ = map_nested(lambda _lowercase : x + 10 , _lowercase , num_proc=_lowercase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
@require_tf
def lowerCAmelCase_ ( self : List[Any] ):
import tensorflow as tf
from tensorflow.keras import layers
a__ = layers.Dense(2 )
def gen_random_output():
a__ = tf.random.uniform((1, 3) )
return model(a__ ).numpy()
with temp_seed(42 ,set_tensorflow=a__ ):
a__ = gen_random_output()
with temp_seed(42 ,set_tensorflow=a__ ):
a__ = gen_random_output()
a__ = gen_random_output()
np.testing.assert_equal(a__ ,a__ )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
import torch
def gen_random_output():
a__ = torch.nn.Linear(3 ,2 )
a__ = torch.rand(1 ,3 )
return model(a__ ).detach().numpy()
with temp_seed(42 ,set_pytorch=a__ ):
a__ = gen_random_output()
with temp_seed(42 ,set_pytorch=a__ ):
a__ = gen_random_output()
a__ = gen_random_output()
np.testing.assert_equal(a__ ,a__ )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
def lowerCAmelCase_ ( self : Tuple ):
def gen_random_output():
return np.random.rand(1 ,3 )
with temp_seed(42 ):
a__ = gen_random_output()
with temp_seed(42 ):
a__ = gen_random_output()
a__ = gen_random_output()
np.testing.assert_equal(a__ ,a__ )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
@pytest.mark.parametrize("input_data" , [{}] )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = NestedDataStructure(_lowercase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
a__ = NestedDataStructure(_lowercase ).flatten()
assert output == expected_output
def _lowerCAmelCase ():
"""simple docstring"""
a__ = A(x=1 , y="foobar" )
a__ = {"x": 1, "y": "foobar"}
assert asdict(_lowercase ) == expected_output
a__ = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
a__ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_lowercase ) == expected_output
with pytest.raises(_lowercase ):
asdict([1, A(x=10 , y="foo" )] )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return text.split()
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowerCAmelCase ():
"""simple docstring"""
with Pool(2 ) as pool:
a__ = list(iflatmap_unordered(_lowercase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowercase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
a__ = list(iflatmap_unordered(_lowercase , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_lowercase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
a__ = []
for yield_time, content in iflatmap_unordered(
_lowercase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_lowercase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_lowercase ) == 4
| 702 |
'''simple docstring'''
import os
UpperCamelCase_ : Dict = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = 0
a__ = 0
while index < len(_lowercase ) - 1:
a__ = SYMBOLS[numerals[index]]
a__ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = ""
a__ = num // 10_00
numerals += m_count * "M"
num %= 10_00
a__ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
a__ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _lowerCAmelCase (_lowercase = "/p089_roman.txt" ):
"""simple docstring"""
a__ = 0
with open(os.path.dirname(_lowercase ) + roman_numerals_filename ) as filea:
a__ = filea.readlines()
for line in lines:
a__ = line.strip()
a__ = parse_roman_numerals(_lowercase )
a__ = generate_roman_numerals(_lowercase )
savings += len(_lowercase ) - len(_lowercase )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 394 | 0 |
'''simple docstring'''
import math
def a__ ( _SCREAMING_SNAKE_CASE : int = 1_00 ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = sum(i * i for i in range(1 , n + 1 ) )
UpperCAmelCase_ : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 71 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 315 | 0 |
from math import isclose, sqrt
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = point_y / 4 / point_x
lowerCamelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase = outgoing_gradient**2 + 4
lowerCamelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCamelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase = x_minus if isclose(UpperCAmelCase__ , UpperCAmelCase__ ) else x_plus
lowerCamelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __lowercase( UpperCAmelCase__ = 1.4 , UpperCAmelCase__ = -9.6 ):
"""simple docstring"""
lowerCamelCase = 0
lowerCamelCase = first_x_coord
lowerCamelCase = first_y_coord
lowerCamelCase = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
lowerCamelCase , lowerCamelCase , lowerCamelCase = next_point(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""") | 484 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = 42
_A = 42
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , __a ):
'''simple docstring'''
lowerCamelCase = [[] for _ in range(__a )]
lowerCamelCase = size
def __getitem__(self , __a ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a (self ):
'''simple docstring'''
return self._size
def _a (self , __a , __a , __a ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(__a , __a ) )
def _a (self , __a , __a ):
'''simple docstring'''
lowerCamelCase = deque([start_vertex] )
lowerCamelCase = [None] * self.size
lowerCamelCase = 0
while queue:
lowerCamelCase = queue.popleft()
lowerCamelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCamelCase = current_distance + edge.weight
lowerCamelCase = distances[edge.destination_vertex]
if (
isinstance(__a , __a )
and new_distance >= dest_vertex_distance
):
continue
lowerCamelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 484 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.