code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
class a__ :
def __init__( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
if vertex not in self.adjacency:
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
self.num_vertices += 1
def __UpperCamelCase ( self : Any,_A : List[Any],_A : Optional[Any],_A : Optional[int] ):
"""simple docstring"""
self.add_vertex(_A )
self.add_vertex(_A )
if head == tail:
return
SCREAMING_SNAKE_CASE_ : List[Any] = weight
SCREAMING_SNAKE_CASE_ : List[str] = weight
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = edge
edges.remove((tail, head, weight) )
for i in range(len(_A ) ):
SCREAMING_SNAKE_CASE_ : int = list(edges[i] )
edges.sort(key=lambda _A : e[2] )
for i in range(len(_A ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
SCREAMING_SNAKE_CASE_ : Dict = edges[i][2] + 1
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE_ : Union[str, Any] = weight
SCREAMING_SNAKE_CASE_ : List[str] = weight
def __str__( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.adjacency[head][tail]
string += F'{head} -> {tail} == {weight}\n'
return string.rstrip("\n" )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _A : Union[str, Any]=None,_A : int=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
if vertices is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = []
if edges is None:
SCREAMING_SNAKE_CASE_ : Tuple = []
for vertex in vertices:
g.add_vertex(_A )
for edge in edges:
g.add_edge(*_A )
return g
class a__ :
def __init__( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {}
SCREAMING_SNAKE_CASE_ : int = {}
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Union[str, Any],_A : Tuple ):
"""simple docstring"""
if item in self.parent:
return self.find(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = item
SCREAMING_SNAKE_CASE_ : Any = 0
return item
def __UpperCamelCase ( self : Tuple,_A : Dict ):
"""simple docstring"""
if item not in self.parent:
return self.make_set(_A )
if item != self.parent[item]:
SCREAMING_SNAKE_CASE_ : List[str] = self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Tuple,_A : Any,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.find(_A )
SCREAMING_SNAKE_CASE_ : int = self.find(_A )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
SCREAMING_SNAKE_CASE_ : Tuple = roota
return roota
if self.rank[roota] < self.rank[roota]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
SCREAMING_SNAKE_CASE_ : int = roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = graph.num_vertices
SCREAMING_SNAKE_CASE_ : Any = Graph.UnionFind()
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
while num_components > 1:
SCREAMING_SNAKE_CASE_ : List[str] = {}
for vertex in graph.get_vertices():
SCREAMING_SNAKE_CASE_ : List[Any] = -1
SCREAMING_SNAKE_CASE_ : str = graph.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = edge
SCREAMING_SNAKE_CASE_ : List[str] = union_find.find(_A )
SCREAMING_SNAKE_CASE_ : Dict = union_find.find(_A )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE_ : int = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE_ : Any = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = cheap_edge[vertex]
if union_find.find(_A ) != union_find.find(_A ):
union_find.union(_A,_A )
mst_edges.append(cheap_edge[vertex] )
SCREAMING_SNAKE_CASE_ : Optional[int] = num_components - 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Graph.build(edges=_A )
return mst
| 216 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Optional[Any] = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 216 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( _a , unittest.TestCase ):
a : Optional[Any] =MgpstrTokenizer
a : List[Any] =False
a : List[Any] ={}
a : Tuple =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
super().setUp()
# fmt: off
UpperCamelCase__ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
UpperCamelCase__ = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case_ ) + '\n' )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case_ ) -> List[str]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = 'tester'
UpperCamelCase__ = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.get_tokenizers(do_lower_case=snake_case_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCamelCase__ = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
UpperCamelCase__ = tokenizer.encode([special_token] , add_special_tokens=snake_case_ )
self.assertEqual(len(snake_case_ ) , 1 )
UpperCamelCase__ = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCamelCase__ , UpperCamelCase__ = self.get_input_output_texts(snake_case_ )
UpperCamelCase__ = tokenizer.tokenize(snake_case_ )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(snake_case_ )
UpperCamelCase__ = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertNotEqual(len(snake_case_ ) , 0 )
UpperCamelCase__ = tokenizer.decode(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(text_a.replace(' ' , '' ) , snake_case_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
pass
| 719 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( _a ):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=False , snake_case_=True , snake_case_="None" , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> str:
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
UpperCamelCase__ = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase__ = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
UpperCamelCase__ = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
UpperCamelCase__ = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( _a , _a , unittest.TestCase ):
a : Any =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
a : Dict =(
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Tuple =True
a : Union[str, Any] =False
a : Tuple =False
a : Union[str, Any] =False
a : Dict =False
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = DebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
UpperCamelCase__ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
UpperCamelCase__ = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase__ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 20 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
UpperCamelCase_ = pd.read_csv("""sample_data.csv""", header=None)
UpperCamelCase_ = df.shape[:1][0]
# If you're using some other dataset input the target column
UpperCamelCase_ = df.iloc[:, 1:2]
UpperCamelCase_ = actual_data.values.reshape(len_data, 1)
UpperCamelCase_ = MinMaxScaler().fit_transform(actual_data)
UpperCamelCase_ = 10
UpperCamelCase_ = 5
UpperCamelCase_ = 20
UpperCamelCase_ = len_data - periods * look_back
UpperCamelCase_ = actual_data[:division]
UpperCamelCase_ = actual_data[division - look_back :]
UpperCamelCase_ , UpperCamelCase_ = [], []
UpperCamelCase_ , UpperCamelCase_ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
UpperCamelCase_ = np.array(train_x)
UpperCamelCase_ = np.array(test_x)
UpperCamelCase_ = np.array([list(i.ravel()) for i in train_y])
UpperCamelCase_ = np.array([list(i.ravel()) for i in test_y])
UpperCamelCase_ = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
UpperCamelCase_ = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
UpperCamelCase_ = model.predict(x_test)
| 92 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : List[Any] = TapasConfig.from_json_file(_UpperCAmelCase )
# set absolute/relative position embeddings parameter
lowerCAmelCase : Any = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCAmelCase : Union[str, Any] = TapasForQuestionAnswering(config=_UpperCAmelCase )
elif task == "WTQ":
# run_task_main.py hparams
lowerCAmelCase : Any = 4
lowerCAmelCase : List[str] = True
# hparam_utils.py hparams
lowerCAmelCase : str = 0.6_6_4_6_9_4
lowerCAmelCase : Tuple = 0.2_0_7_9_5_1
lowerCAmelCase : Any = 0.1_2_1_1_9_4
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Any = True
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = 0.0_3_5_2_5_1_3
lowerCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=_UpperCAmelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCAmelCase : Optional[int] = 4
lowerCAmelCase : List[str] = False
# hparam_utils.py hparams
lowerCAmelCase : Tuple = 3_6.4_5_1_9
lowerCAmelCase : List[str] = 0.9_0_3_4_2_1
lowerCAmelCase : Optional[Any] = 2_2_2.0_8_8
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : str = True
lowerCAmelCase : int = 0.7_6_3_1_4_1
lowerCAmelCase : Any = TapasForQuestionAnswering(config=_UpperCAmelCase )
elif task == "TABFACT":
lowerCAmelCase : List[str] = TapasForSequenceClassification(config=_UpperCAmelCase )
elif task == "MLM":
lowerCAmelCase : int = TapasForMaskedLM(config=_UpperCAmelCase )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCAmelCase : List[str] = TapasModel(config=_UpperCAmelCase )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
lowerCAmelCase : int = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt', model_max_length=512 )
tokenizer.save_pretrained(_UpperCAmelCase )
print('Used relative position embeddings:', model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 343 | 0 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Optional[Any] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : List[Any] = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : int = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : List[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : int = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase : Union[str, Any] = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : List[str] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase : List[Any] = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Optional[Any] = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
UpperCamelCase : Optional[int] = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[Any] = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase : int = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : Optional[int] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
UpperCamelCase : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
UpperCamelCase : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : int = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase : List[str] = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
| 716 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''timesformer'''
def __init__( self , lowerCamelCase=2_24 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=8 , lowerCamelCase=7_68 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=30_72 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-6 , lowerCamelCase=True , lowerCamelCase="divided_space_time" , lowerCamelCase=0 , **lowerCamelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase )
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : Dict = num_channels
UpperCamelCase : int = num_frames
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Any = qkv_bias
UpperCamelCase : int = attention_type
UpperCamelCase : int = drop_path_rate
| 435 | 0 |
def lowerCamelCase__ ( _lowercase , _lowercase = " " ):
'''simple docstring'''
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Any = 0
for index, char in enumerate(_lowercase ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase_ : int = index + 1
elif index + 1 == len(_lowercase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod() | 30 |
import unittest
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = np.shape(_lowercase )
UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase )
UpperCAmelCase_ : Tuple = np.shape(_lowercase )
if shape_a[0] != shape_b[0]:
UpperCAmelCase_ : Tuple = (
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_lowercase )
if shape_b[1] != shape_c[1]:
UpperCAmelCase_ : List[Any] = (
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_lowercase )
UpperCAmelCase_ : Dict = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase_ : Any = np.linalg.inv(_lowercase )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] )
UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s )
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> None:
UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 30 | 1 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class snake_case ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowercase : Tuple=None , **__lowercase : List[Any] ):
'''simple docstring'''
super().__init__(features=__lowercase )
__UpperCAmelCase : Any = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A_ ( self : List[Any] , __lowercase : Tuple ):
'''simple docstring'''
import torch
if isinstance(__lowercase , __lowercase ) and column:
if all(
isinstance(__lowercase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__lowercase )
return column
def A_ ( self : Optional[int] , __lowercase : Optional[Any] ):
'''simple docstring'''
import torch
if isinstance(__lowercase , (str, bytes, type(__lowercase )) ):
return value
elif isinstance(__lowercase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : Union[str, Any] = {}
if isinstance(__lowercase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : Any = {'''dtype''': torch.intaa}
elif isinstance(__lowercase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : int = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowercase , PIL.Image.Image ):
__UpperCAmelCase : List[Any] = np.asarray(__lowercase )
return torch.tensor(__lowercase , **{**default_dtype, **self.torch_tensor_kwargs} )
def A_ ( self : Optional[Any] , __lowercase : List[Any] ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(__lowercase , '''__array__''' ) and not isinstance(__lowercase , torch.Tensor ):
__UpperCAmelCase : List[str] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowercase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowercase ) for substruct in data_struct] )
elif isinstance(__lowercase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowercase ) for substruct in data_struct] )
return self._tensorize(__lowercase )
def A_ ( self : Optional[int] , __lowercase : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , __lowercase , map_list=__lowercase )
def A_ ( self : Dict , __lowercase : pa.Table ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.numpy_arrow_extractor().extract_row(__lowercase )
__UpperCAmelCase : List[Any] = self.python_features_decoder.decode_row(__lowercase )
return self.recursive_tensorize(__lowercase )
def A_ ( self : List[str] , __lowercase : pa.Table ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_column(__lowercase )
__UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(__lowercase , pa_table.column_names[0] )
__UpperCAmelCase : int = self.recursive_tensorize(__lowercase )
__UpperCAmelCase : int = self._consolidate(__lowercase )
return column
def A_ ( self : Union[str, Any] , __lowercase : pa.Table ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(__lowercase )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_batch(__lowercase )
__UpperCAmelCase : List[Any] = self.recursive_tensorize(__lowercase )
for column_name in batch:
__UpperCAmelCase : List[Any] = self._consolidate(batch[column_name] )
return batch | 374 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase__ :int = 'src/transformers'
lowercase__ :List[str] = 'docs/source/en/tasks'
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->str:
"""simple docstring"""
with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
__UpperCAmelCase : Any = 0
while not lines[start_index].startswith(UpperCAmelCase_ ):
start_index += 1
start_index += 1
__UpperCAmelCase : Optional[Any] = start_index
while not lines[end_index].startswith(UpperCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ :Any = direct_transformers_import(TRANSFORMERS_PATH)
lowercase__ :List[Any] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase__ :Union[str, Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : List[str] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCAmelCase : Dict = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCAmelCase_ , set() )
__UpperCAmelCase : List[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=False ) ->Tuple:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = _find_text_in_file(
filename=os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
__UpperCAmelCase : List[str] = get_model_list_for_task(UpperCAmelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
''' to fix this.''' )
if __name__ == "__main__":
lowercase__ :int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase__ :Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 374 | 1 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
A_ : str = logging.getLogger(__name__)
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self ):
UpperCamelCase_: Optional[int] = False
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not self.initialized:
UpperCamelCase_: Optional[Any] = RagRetriever(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , )
UpperCamelCase_: str = True
def _a ( self ):
self.retriever.index.init_index()
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ ,UpperCamelCase_: Any = self.retriever._main_retrieve(_lowerCamelCase , _lowerCamelCase )
return doc_ids, retrieved_doc_embeds
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
if index is not None and index.is_initialized() and len(_lowerCamelCase ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , )
UpperCamelCase_: List[str] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for worker in self.retrieval_workers
] )
def _a ( self ):
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
UpperCamelCase_: Union[str, Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
UpperCamelCase_ ,UpperCamelCase_: str = ray.get(random_worker.retrieve.remote(_lowerCamelCase , _lowerCamelCase ) )
else:
UpperCamelCase_ ,UpperCamelCase_: Dict = self._main_retrieve(_lowerCamelCase , _lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCamelCase )
@classmethod
def _a ( cls , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
return super(_lowerCamelCase , cls ).get_tokenizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
@classmethod
def _a ( cls , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
UpperCamelCase_: List[str] = kwargs.pop('config' , _lowerCamelCase ) or RagConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: Optional[int] = RagTokenizer.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
UpperCamelCase_: List[str] = rag_tokenizer.question_encoder
UpperCamelCase_: List[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
UpperCamelCase_: Union[str, Any] = 'custom'
UpperCamelCase_: int = CustomHFIndex(config.retrieval_vector_size , _lowerCamelCase )
else:
UpperCamelCase_: str = cls._build_index(_lowerCamelCase )
return cls(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , retrieval_workers=_lowerCamelCase , index=_lowerCamelCase , ) | 57 | '''simple docstring'''
_UpperCAmelCase : Any = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 107 | 0 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
lowerCamelCase_ : Optional[datasets.Features] = None
lowerCamelCase_ : str = "utf-8"
lowerCamelCase_ : Optional[str] = None
lowerCamelCase_ : Optional[str] = None
lowerCamelCase_ : bool = True # deprecated
lowerCamelCase_ : Optional[int] = None # deprecated
lowerCamelCase_ : int = 1_0 << 2_0 # 10MB
lowerCamelCase_ : Optional[bool] = None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
lowerCamelCase_ : List[str] = JsonConfig
def _snake_case ( self : int):
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead")
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.")
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported")
return datasets.DatasetInfo(features=self.config.features)
def _snake_case ( self : Dict , UpperCAmelCase : str):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}")
SCREAMING_SNAKE_CASE_ :int = dl_manager.download_and_extract(self.config.data_files)
if isinstance(A__ , (str, list, tuple)):
SCREAMING_SNAKE_CASE_ :Optional[Any] = data_files
if isinstance(A__ , A__):
SCREAMING_SNAKE_CASE_ :List[str] = [files]
SCREAMING_SNAKE_CASE_ :int = [dl_manager.iter_files(A__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
SCREAMING_SNAKE_CASE_ :List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__):
SCREAMING_SNAKE_CASE_ :Optional[int] = [files]
SCREAMING_SNAKE_CASE_ :Optional[int] = [dl_manager.iter_files(A__) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"files": files}))
return splits
def _snake_case ( self : str , UpperCAmelCase : Any):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.config.features.arrow_schema.field(A__).type
SCREAMING_SNAKE_CASE_ :str = pa_table.append_column(A__ , pa.array([None] * len(A__) , type=A__))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ :Optional[int] = table_cast(A__ , self.config.features.arrow_schema)
return pa_table
def _snake_case ( self : List[Any] , UpperCAmelCase : Tuple):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
SCREAMING_SNAKE_CASE_ :Optional[Any] = json.load(A__)
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE_ :Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple)):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = set().union(*[row.keys() for row in dataset])
SCREAMING_SNAKE_CASE_ :Any = {col: [row.get(A__) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE_ :Any = dataset
SCREAMING_SNAKE_CASE_ :Any = pa.Table.from_pydict(A__)
yield file_idx, self._cast_table(A__)
# If the file has one json object per line
else:
with open(A__ , "rb") as f:
SCREAMING_SNAKE_CASE_ :List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE_ :List[str] = max(self.config.chunksize // 32 , 16 << 10)
SCREAMING_SNAKE_CASE_ :Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
SCREAMING_SNAKE_CASE_ :Dict = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE_ :List[Any] = batch.decode(self.config.encoding , errors=A__).encode("utf-8")
try:
while True:
try:
SCREAMING_SNAKE_CASE_ :str = paj.read_json(
io.BytesIO(A__) , read_options=paj.ReadOptions(block_size=A__))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid)
and "straddling" not in str(A__)
or block_size > len(A__)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"Batch of {len(A__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
SCREAMING_SNAKE_CASE_ :Optional[Any] = json.load(A__)
except json.JSONDecodeError:
logger.error(F"Failed to read file '{file}' with error {type(A__)}: {e}")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE_ :str = set().union(*[row.keys() for row in dataset])
SCREAMING_SNAKE_CASE_ :List[str] = {col: [row.get(A__) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE_ :int = pa.Table.from_pydict(A__)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"Failed to read file '{file}' with error {type(A__)}: {e}")
raise ValueError(F"Not able to read records in the JSON file at {file}.") from None
yield file_idx, self._cast_table(A__)
break
else:
logger.error(F"Failed to read file '{file}' with error {type(A__)}: {e}")
raise ValueError(
F"Not able to read records in the JSON file at {file}. "
F"You should probably indicate the field of the JSON file containing your records. "
F"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
F"Select the correct one and provide it as `field='XXX'` to the dataset loading method. ") from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__)
batch_idx += 1
| 720 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 140 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=_UpperCamelCase ):
_lowerCAmelCase : List[Any] = ["""torch""", """scipy"""]
def __init__( self , *lowercase__ , **lowercase__):
requires_backends(self , ['''torch''', '''scipy'''])
@classmethod
def A( cls , *lowercase__ , **lowercase__):
requires_backends(cls , ['''torch''', '''scipy'''])
@classmethod
def A( cls , *lowercase__ , **lowercase__):
requires_backends(cls , ['''torch''', '''scipy'''])
| 462 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=18 , UpperCamelCase__=30 , UpperCamelCase__=400 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> List[str]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size if size is not None else {"""height""": 18, """width""": 20}
A_ = do_thumbnail
A_ = do_align_axis
A_ = do_pad
A_ = do_normalize
A_ = image_mean
A_ = image_std
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class A__ ( _snake_case , unittest.TestCase ):
lowercase = DonutImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = DonutImageProcessingTester(self )
@property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_thumbnail""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@is_flaky()
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 709 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowercase : Union[str, Any] = None
lowercase : int = logging.get_logger(__name__)
lowercase : Dict = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase : int = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowercase : List[Any] = {
"""t5-small""": 5_1_2,
"""t5-base""": 5_1_2,
"""t5-large""": 5_1_2,
"""t5-3b""": 5_1_2,
"""t5-11b""": 5_1_2,
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : int = VOCAB_FILES_NAMES
__A : List[str] = PRETRAINED_VOCAB_FILES_MAP
__A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Any = ['''input_ids''', '''attention_mask''']
__A : Optional[Any] = TaTokenizer
__A : List[int] = []
def __init__( self , lowercase=None , lowercase=None , lowercase="</s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase=100 , lowercase=None , **lowercase , ) -> List[str]:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
a__ : Tuple = [F'<extra_id_{i}>' for i in range(lowercase)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
a__ : Optional[int] = len(set(filter(lambda lowercase: bool('extra_id_' in str(lowercase)) , lowercase)))
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens')
super().__init__(
lowercase , tokenizer_file=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , extra_ids=lowercase , additional_special_tokens=lowercase , **lowercase , )
a__ : Optional[int] = vocab_file
a__ : Optional[Any] = False if not self.vocab_file else True
a__ : Optional[Any] = extra_ids
@staticmethod
def __lowercase ( lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
a__ : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowercase , )
return max_model_length
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowercase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
a__ : List[Any] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase):
copyfile(self.vocab_file , lowercase)
logger.info(F'Copy vocab file to {out_vocab_file}')
return (out_vocab_file,)
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
a__ : List[Any] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : str = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return list(
set(filter(lambda lowercase: bool(re.search(r'<extra_id_\d+>' , lowercase)) is not None , self.additional_special_tokens)))
def __lowercase ( self) -> Dict:
'''simple docstring'''
return [self.convert_tokens_to_ids(lowercase) for token in self.get_sentinel_tokens()]
| 302 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=3 , lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[int] = parent
a__ : Any = batch_size
a__ : Tuple = image_size
a__ : Optional[Any] = patch_size
a__ : Optional[Any] = num_channels
a__ : Dict = is_training
a__ : Optional[int] = use_labels
a__ : Optional[Any] = hidden_size
a__ : Dict = num_hidden_layers
a__ : Union[str, Any] = num_attention_heads
a__ : Optional[Any] = intermediate_size
a__ : Dict = hidden_act
a__ : Tuple = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : List[str] = type_sequence_label_size
a__ : Tuple = initializer_range
a__ : Any = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : str = (image_size // patch_size) ** 2
a__ : Union[str, Any] = num_patches + 1
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : List[str] = None
if self.use_labels:
a__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> int:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = TFViTModel(config=lowercase)
a__ : int = model(lowercase , training=lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# Test with an image with different size than the one specified in config.
a__ : Optional[Any] = self.image_size // 2
a__ : List[str] = pixel_values[:, :, :image_size, :image_size]
a__ : Union[str, Any] = model(lowercase , interpolate_pos_encoding=lowercase , training=lowercase)
a__ : str = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : Any = self.type_sequence_label_size
a__ : Dict = TFViTForImageClassification(lowercase)
a__ : Tuple = model(lowercase , labels=lowercase , training=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# Test with an image with different size than the one specified in config.
a__ : str = self.image_size // 2
a__ : int = pixel_values[:, :, :image_size, :image_size]
a__ : List[str] = model(lowercase , interpolate_pos_encoding=lowercase , training=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a__ : List[Any] = 1
a__ : Optional[int] = TFViTForImageClassification(lowercase)
a__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a__ : Optional[Any] = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
a__ , a__ , a__ : int = config_and_inputs
a__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : List[Any] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__A : Optional[int] = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
__A : Optional[int] = False
__A : Any = False
__A : Tuple = False
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[int] = TFViTModelTester(self)
a__ : str = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def __lowercase ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='ViT does not use inputs_embeds')
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ , a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
a__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , tf.keras.layers.Layer))
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[Any] = model_class(lowercase)
a__ : Optional[int] = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : str = [*signature.parameters.keys()]
a__ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase)
@slow
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : str = TFViTModel.from_pretrained('google/vit-base-patch16-224')
self.assertIsNotNone(lowercase)
def A_ ( ) -> Any:
a__ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self) -> int:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : int = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224')
a__ : Optional[int] = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Any = image_processor(images=lowercase , return_tensors='tf')
# forward pass
a__ : Union[str, Any] = model(**lowercase)
# verify the logits
a__ : Any = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Dict = tf.constant([-0.27_44, 0.82_15, -0.08_36])
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1e-4)
| 302 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Union[str, Any] =str(_SCREAMING_SNAKE_CASE )
return n == n[::-1]
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE = 100_0000 ):
lowerCAmelCase_ : Optional[int] =0
for i in range(1 , _SCREAMING_SNAKE_CASE ):
if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 717 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = (UniPCMultistepScheduler,)
_UpperCamelCase : str = (('''num_inference_steps''', 25),)
def __A ( self : Tuple , **UpperCamelCase_ : Tuple ):
lowerCAmelCase_ : Dict ={
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**UpperCamelCase_ )
return config
def __A ( self : Any , UpperCamelCase_ : List[Any]=0 , **UpperCamelCase_ : List[Any] ):
lowerCAmelCase_ : List[str] =dict(self.forward_default_kwargs )
lowerCAmelCase_ : Optional[int] =kwargs.pop('''num_inference_steps''' , UpperCamelCase_ )
lowerCAmelCase_ : List[str] =self.dummy_sample
lowerCAmelCase_ : Optional[Any] =0.1 * sample
lowerCAmelCase_ : List[str] =[residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Tuple =self.get_scheduler_config(**UpperCamelCase_ )
lowerCAmelCase_ : Any =scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
lowerCAmelCase_ : Optional[int] =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
lowerCAmelCase_ : List[Any] =scheduler_class.from_pretrained(UpperCamelCase_ )
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
lowerCAmelCase_ : List[Any] =dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] =sample, sample
for t in range(UpperCamelCase_ , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase_ : List[Any] =scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
lowerCAmelCase_ : str =new_scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __A ( self : Optional[int] , UpperCamelCase_ : str=0 , **UpperCamelCase_ : List[str] ):
lowerCAmelCase_ : List[Any] =dict(self.forward_default_kwargs )
lowerCAmelCase_ : Optional[int] =kwargs.pop('''num_inference_steps''' , UpperCamelCase_ )
lowerCAmelCase_ : Any =self.dummy_sample
lowerCAmelCase_ : Any =0.1 * sample
lowerCAmelCase_ : List[Any] =[residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : List[Any] =self.get_scheduler_config()
lowerCAmelCase_ : Union[str, Any] =scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase_ : Union[str, Any] =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
lowerCAmelCase_ : str =scheduler_class.from_pretrained(UpperCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase_ : int =dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase_ : int =scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
lowerCAmelCase_ : Optional[Any] =new_scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __A ( self : str , UpperCamelCase_ : Any=None , **UpperCamelCase_ : str ):
if scheduler is None:
lowerCAmelCase_ : Tuple =self.scheduler_classes[0]
lowerCAmelCase_ : List[str] =self.get_scheduler_config(**UpperCamelCase_ )
lowerCAmelCase_ : Union[str, Any] =scheduler_class(**UpperCamelCase_ )
lowerCAmelCase_ : int =self.scheduler_classes[0]
lowerCAmelCase_ : Tuple =self.get_scheduler_config(**UpperCamelCase_ )
lowerCAmelCase_ : Optional[Any] =scheduler_class(**UpperCamelCase_ )
lowerCAmelCase_ : Optional[Any] =10
lowerCAmelCase_ : Tuple =self.dummy_model()
lowerCAmelCase_ : Any =self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Union[str, Any] =model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase_ : Dict =scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
return sample
def __A ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict =dict(self.forward_default_kwargs )
lowerCAmelCase_ : List[Any] =kwargs.pop('''num_inference_steps''' , UpperCamelCase_ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Dict =self.get_scheduler_config()
lowerCAmelCase_ : int =scheduler_class(**UpperCamelCase_ )
lowerCAmelCase_ : List[str] =self.dummy_sample
lowerCAmelCase_ : List[str] =0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase_ , '''set_timesteps''' ):
scheduler.set_timesteps(UpperCamelCase_ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase_ , '''set_timesteps''' ):
lowerCAmelCase_ : str =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase_ : int =[residual + 0.2, residual + 0.1_5, residual + 0.1_0]
lowerCAmelCase_ : Dict =dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase_ : Optional[int] =scheduler.timesteps[5]
lowerCAmelCase_ : Optional[int] =scheduler.timesteps[6]
lowerCAmelCase_ : List[str] =scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
lowerCAmelCase_ : List[Any] =scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __A ( self : List[Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase_ : Union[str, Any] =UniPCMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase_ : Union[str, Any] =self.full_loop(scheduler=UpperCamelCase_ )
lowerCAmelCase_ : int =torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
lowerCAmelCase_ : Any =DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : Dict =DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : Optional[Any] =DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : List[Any] =UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : Optional[int] =self.full_loop(scheduler=UpperCamelCase_ )
lowerCAmelCase_ : Union[str, Any] =torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __A ( self : Tuple ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def __A ( self : List[str] ):
self.check_over_configs(thresholding=UpperCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , solver_order=UpperCamelCase_ , solver_type=UpperCamelCase_ , )
def __A ( self : Optional[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def __A ( self : int ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCamelCase_ , solver_type=UpperCamelCase_ , prediction_type=UpperCamelCase_ , )
lowerCAmelCase_ : Union[str, Any] =self.full_loop(
solver_order=UpperCamelCase_ , solver_type=UpperCamelCase_ , prediction_type=UpperCamelCase_ , )
assert not torch.isnan(UpperCamelCase_ ).any(), "Samples have nan numbers"
def __A ( self : Tuple ):
self.check_over_configs(lower_order_final=UpperCamelCase_ )
self.check_over_configs(lower_order_final=UpperCamelCase_ )
def __A ( self : List[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=UpperCamelCase_ , time_step=0 )
def __A ( self : Any ):
lowerCAmelCase_ : Optional[Any] =self.full_loop()
lowerCAmelCase_ : int =torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __A ( self : Tuple ):
lowerCAmelCase_ : List[str] =self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase_ : List[str] =torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __A ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] =self.scheduler_classes[0]
lowerCAmelCase_ : Optional[int] =self.get_scheduler_config(thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0 )
lowerCAmelCase_ : Optional[Any] =scheduler_class(**UpperCamelCase_ )
lowerCAmelCase_ : Optional[Any] =10
lowerCAmelCase_ : Union[str, Any] =self.dummy_model()
lowerCAmelCase_ : List[Any] =self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Dict =model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase_ : List[Any] =scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
def __A ( self : List[str] , **UpperCamelCase_ : List[Any] ):
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : int =self.get_scheduler_config(**UpperCamelCase_ )
lowerCAmelCase_ : Optional[int] =scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 305 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
snake_case__ : List[Any] = False
try:
snake_case__ : int = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class _A :
'''simple docstring'''
def __init__( self : Dict , lowerCamelCase : str = None , lowerCamelCase : list = [] ):
'''simple docstring'''
__lowercase = 0
__lowercase = choices
__lowercase = prompt
if sys.platform == "win32":
__lowercase = "*"
else:
__lowercase = "โ "
def _snake_case ( self : List[str] , lowerCamelCase : Dict , lowerCamelCase : str = "" ):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _lowerCAmelCase )
else:
forceWrite(self.choices[index] , _lowerCAmelCase )
def _snake_case ( self : Dict , lowerCamelCase : int ):
'''simple docstring'''
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(_lowerCAmelCase )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def _snake_case ( self : Optional[Any] , lowerCamelCase : Direction , lowerCamelCase : int = 1 ):
'''simple docstring'''
__lowercase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowerCAmelCase )
move_cursor(_lowerCAmelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def _snake_case ( self : Any ):
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def _snake_case ( self : Tuple ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowerCAmelCase )] for number in range(10 )] )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = int(chr(self.current_selection ) )
__lowercase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _lowerCAmelCase )
else:
return
else:
return
def _snake_case ( self : Optional[int] , lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
__lowercase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowerCAmelCase )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
__lowercase = int(builtins.input() )
except ValueError:
__lowercase = default_choice
else:
__lowercase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(_lowerCAmelCase , "\n" )
return choice
| 402 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
__UpperCamelCase : Tuple = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
__UpperCamelCase : Optional[Any] = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
__UpperCamelCase : Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
__UpperCamelCase : int = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
__UpperCamelCase : List[Any] = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
__UpperCamelCase : List[Any] = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
__UpperCamelCase : List[str] = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
__UpperCamelCase : List[str] = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
__UpperCamelCase : int = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
__UpperCamelCase : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
__UpperCamelCase : str = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
__UpperCamelCase : Optional[int] = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
__UpperCamelCase : Dict = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
__UpperCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__UpperCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__UpperCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__UpperCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__UpperCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__UpperCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__UpperCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__UpperCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__UpperCamelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__UpperCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__UpperCamelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Tuple = FLAX_MODEL_MAPPING
__UpperCamelCase : Tuple = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Union[str, Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__UpperCamelCase : List[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Dict = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :List[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__UpperCamelCase : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :List[Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[int] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__UpperCamelCase : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__UpperCamelCase : int = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__UpperCamelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCamelCase : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__UpperCamelCase : str = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 80 | 0 |
_a : List[str] = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def UpperCamelCase__ ( _A: float ):
'''simple docstring'''
assert type(_A ) in (int, float) and decimal == int(_A )
__lowerCamelCase = int(_A )
__lowerCamelCase = """"""
__lowerCamelCase = False
if decimal < 0:
__lowerCamelCase = True
decimal *= -1
while decimal > 0:
__lowerCamelCase , __lowerCamelCase = divmod(_A , 16 )
__lowerCamelCase = values[remainder] + hexadecimal
__lowerCamelCase = """0x""" + hexadecimal
if negative:
__lowerCamelCase = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 571 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
A = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = TextaTextGenerationPipeline(model=UpperCAmelCase , tokenizer=UpperCAmelCase )
return generator, ["Something to write", "Something else"]
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = generator("""Something there""" )
self.assertEqual(UpperCAmelCase , [{"""generated_text""": ANY(UpperCAmelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
__lowerCamelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
[{"""generated_text""": ANY(UpperCAmelCase )}, {"""generated_text""": ANY(UpperCAmelCase )}],
[{"""generated_text""": ANY(UpperCAmelCase )}, {"""generated_text""": ANY(UpperCAmelCase )}],
] , )
__lowerCamelCase = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
[{"""generated_text""": ANY(UpperCAmelCase )}, {"""generated_text""": ANY(UpperCAmelCase )}],
[{"""generated_text""": ANY(UpperCAmelCase )}, {"""generated_text""": ANY(UpperCAmelCase )}],
] , )
with self.assertRaises(UpperCAmelCase ):
generator(4 )
@require_torch
def lowerCamelCase_ ( self ):
__lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
__lowerCamelCase = generator("""Something there""" , do_sample=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , [{"""generated_text""": """"""}] )
__lowerCamelCase = 3
__lowerCamelCase = generator(
"""Something there""" , num_return_sequences=UpperCAmelCase , num_beams=UpperCAmelCase , )
__lowerCamelCase = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase = generator("""This is a test""" , do_sample=UpperCAmelCase , num_return_sequences=2 , return_tensors=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
__lowerCamelCase = generator.model.config.eos_token_id
__lowerCamelCase = """<pad>"""
__lowerCamelCase = generator(
["""This is a test""", """This is a second test"""] , do_sample=UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCAmelCase , )
self.assertEqual(
UpperCAmelCase , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase_ ( self ):
__lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
__lowerCamelCase = generator("""Something there""" , do_sample=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , [{"""generated_text""": """"""}] )
| 571 | 1 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(__a )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Tuple , *__a : Tuple , **__a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__( self : Any , __a : Tuple , **__a : Optional[int] ) -> str:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return {}, {}, {}
def lowerCAmelCase ( self : List[str] , __a : Dict ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = load_image(SCREAMING_SNAKE_CASE__ )
__lowercase : Any = image.size
__lowercase : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase ( self : int , __a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : str = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : Dict = model_outputs.predicted_depth
__lowercase : Optional[Any] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
__lowercase : str = prediction.squeeze().cpu().numpy()
__lowercase : Dict = (output * 255 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
__lowercase : Optional[Any] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
__lowercase : int = {}
__lowercase : Union[str, Any] = predicted_depth
__lowercase : List[Any] = depth
return output_dict | 149 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowerCamelCase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCamelCase ( a_ : Any , a_ : Union[str, Any]=False ) -> int:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = create_model(
'''HTSAT-tiny''' , '''roberta''' , a_ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=a_ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def __lowerCamelCase ( a_ : Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE :Dict = {}
__SCREAMING_SNAKE_CASE :Any = r'''.*sequential.(\d+).*'''
__SCREAMING_SNAKE_CASE :Dict = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__SCREAMING_SNAKE_CASE :str = key.replace(a_ , a_ )
if re.match(a_ , a_ ):
# replace sequential layers with list
__SCREAMING_SNAKE_CASE :Tuple = re.match(a_ , a_ ).group(1 )
__SCREAMING_SNAKE_CASE :str = key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(a_ )//3}.linear.''' )
elif re.match(a_ , a_ ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = int(re.match(a_ , a_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__SCREAMING_SNAKE_CASE :Union[str, Any] = 1 if projecton_layer == 0 else 2
__SCREAMING_SNAKE_CASE :Tuple = key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__SCREAMING_SNAKE_CASE :Union[str, Any] = value
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv.size(0 ) // 3
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv[:qkv_dim]
__SCREAMING_SNAKE_CASE :Optional[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
__SCREAMING_SNAKE_CASE :str = mixed_qkv[qkv_dim * 2 :]
__SCREAMING_SNAKE_CASE :Dict = query_layer
__SCREAMING_SNAKE_CASE :Tuple = key_layer
__SCREAMING_SNAKE_CASE :str = value_layer
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = value
return model_state_dict
def __lowerCamelCase ( a_ : Optional[int] , a_ : Dict , a_ : Dict , a_ : List[Any]=False ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = init_clap(a_ , enable_fusion=a_ )
clap_model.eval()
__SCREAMING_SNAKE_CASE :Optional[Any] = clap_model.state_dict()
__SCREAMING_SNAKE_CASE :Tuple = rename_state_dict(a_ )
__SCREAMING_SNAKE_CASE :Any = ClapConfig()
__SCREAMING_SNAKE_CASE :Tuple = enable_fusion
__SCREAMING_SNAKE_CASE :Dict = ClapModel(a_ )
# ignore the spectrogram embedding layer
model.load_state_dict(a_ , strict=a_ )
model.save_pretrained(a_ )
transformers_config.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowerCamelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 498 | 0 |
def _lowercase ( lowerCamelCase__ : int = 3, lowerCamelCase__ : int = 7, lowerCamelCase__ : int = 1_000_000 ):
_a = 0
_a = 1
for current_denominator in range(1, limit + 1 ):
_a = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_a = current_numerator
_a = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 708 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Tuple = "\\n Text data.\n Second line of data."
__snake_case : int = "file"
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_a = bytes(lowerCamelCase__, "utf-8" )
with zstd.open(lowerCamelCase__, "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture
def _lowercase ( lowerCamelCase__ : int ):
with open(os.path.join(tmpfs.local_root_dir, lowerCamelCase__ ), "w" ) as f:
f.write(lowerCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"] )
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[int], lowerCamelCase__ : List[str], lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Dict ):
_a = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_a = input_paths[compression_format]
_a = tmp_path / "cache"
_a = DownloadConfig(cache_dir=lowerCamelCase__, extract_compressed_file=lowerCamelCase__ )
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
with open(lowerCamelCase__ ) as f:
_a = f.read()
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted", [True, False] )
@pytest.mark.parametrize("default_cache_dir", [True, False] )
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str], lowerCamelCase__ : List[str] ):
_a = "custom_cache"
_a = "custom_extracted_dir"
_a = tmp_path / "custom_extracted_path"
if default_extracted:
_a = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", lowerCamelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(lowerCamelCase__ ) )
_a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a = xz_file
_a = (
DownloadConfig(extract_compressed_file=lowerCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=lowerCamelCase__ )
)
_a = cached_path(lowerCamelCase__, download_config=lowerCamelCase__ )
assert Path(lowerCamelCase__ ).parent.parts[-2:] == expected
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
# absolute path
_a = str(Path(lowerCamelCase__ ).resolve() )
assert cached_path(lowerCamelCase__ ) == text_file
# relative path
_a = str(Path(lowerCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCamelCase__ ) == text_file
def _lowercase ( lowerCamelCase__ : Dict ):
# absolute path
_a = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
# relative path
_a = "./__missing_file__.txt"
with pytest.raises(lowerCamelCase__ ):
cached_path(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowerCamelCase__ ) as f:
_a = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( ):
with pytest.raises(lowerCamelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
http_get("https://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
ftp_get("ftp://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE", lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any] ):
_a = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowerCamelCase__ ):
fsspec_get("s3://huggingface.co", temp_file=lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
fsspec_head("s3://huggingface.co" )
| 691 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
A = logging.get_logger(__name__)
class a__ ( a_ ):
def __init__( self : int , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[Any]):
"""simple docstring"""
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_)
| 77 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class snake_case_ ( a_ ):
__lowerCAmelCase = (DEISMultistepScheduler,)
__lowerCAmelCase = (("num_inference_steps", 2_5),)
def snake_case_ ( self , **a_ ):
a_ : Optional[Any] = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**a_ )
return config
def snake_case_ ( self , a_=0 , **a_ ):
a_ : Union[str, Any] = dict(self.forward_default_kwargs )
a_ : Union[str, Any] = kwargs.pop("num_inference_steps" , a_ )
a_ : List[str] = self.dummy_sample
a_ : Union[str, Any] = 0.1 * sample
a_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a_ : Any = self.get_scheduler_config(**a_ )
a_ : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
a_ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
a_ : Dict = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
a_ : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
a_ , a_ : List[str] = sample, sample
for t in range(a_ , time_step + scheduler.config.solver_order + 1 ):
a_ : Dict = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
a_ : List[str] = new_scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ):
pass
def snake_case_ ( self , a_=0 , **a_ ):
a_ : List[str] = dict(self.forward_default_kwargs )
a_ : Dict = kwargs.pop("num_inference_steps" , a_ )
a_ : List[str] = self.dummy_sample
a_ : str = 0.1 * sample
a_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a_ : Union[str, Any] = self.get_scheduler_config()
a_ : Optional[Any] = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
a_ : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
a_ : List[Any] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
a_ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
a_ : Optional[int] = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
a_ : Tuple = new_scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , a_=None , **a_ ):
if scheduler is None:
a_ : Optional[Any] = self.scheduler_classes[0]
a_ : Dict = self.get_scheduler_config(**a_ )
a_ : Union[str, Any] = scheduler_class(**a_ )
a_ : Optional[int] = self.scheduler_classes[0]
a_ : List[str] = self.get_scheduler_config(**a_ )
a_ : Tuple = scheduler_class(**a_ )
a_ : Optional[int] = 1_0
a_ : Optional[Any] = self.dummy_model()
a_ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
a_ : str = model(a_ , a_ )
a_ : str = scheduler.step(a_ , a_ , a_ ).prev_sample
return sample
def snake_case_ ( self ):
a_ : Union[str, Any] = dict(self.forward_default_kwargs )
a_ : Tuple = kwargs.pop("num_inference_steps" , a_ )
for scheduler_class in self.scheduler_classes:
a_ : List[Any] = self.get_scheduler_config()
a_ : str = scheduler_class(**a_ )
a_ : Any = self.dummy_sample
a_ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(a_ , "set_timesteps" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_ , "set_timesteps" ):
a_ : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
a_ : str = dummy_past_residuals[: scheduler.config.solver_order]
a_ : str = scheduler.timesteps[5]
a_ : Dict = scheduler.timesteps[6]
a_ : Dict = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
a_ : Any = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
a_ : Dict = DEISMultistepScheduler(**self.get_scheduler_config() )
a_ : List[str] = self.full_loop(scheduler=a_ )
a_ : Tuple = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
a_ : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a_ : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
a_ : Tuple = UniPCMultistepScheduler.from_config(scheduler.config )
a_ : Any = DEISMultistepScheduler.from_config(scheduler.config )
a_ : str = self.full_loop(scheduler=a_ )
a_ : Any = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def snake_case_ ( self ):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a_ )
def snake_case_ ( self ):
self.check_over_configs(thresholding=a_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a_ , prediction_type=a_ , sample_max_value=a_ , algorithm_type="deis" , solver_order=a_ , solver_type=a_ , )
def snake_case_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def snake_case_ ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a_ , solver_type=a_ , prediction_type=a_ , algorithm_type=a_ , )
a_ : List[Any] = self.full_loop(
solver_order=a_ , solver_type=a_ , prediction_type=a_ , algorithm_type=a_ , )
assert not torch.isnan(a_ ).any(), "Samples have nan numbers"
def snake_case_ ( self ):
self.check_over_configs(lower_order_final=a_ )
self.check_over_configs(lower_order_final=a_ )
def snake_case_ ( self ):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=a_ , time_step=0 )
def snake_case_ ( self ):
a_ : str = self.full_loop()
a_ : Dict = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def snake_case_ ( self ):
a_ : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
a_ : Union[str, Any] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def snake_case_ ( self ):
a_ : List[str] = self.scheduler_classes[0]
a_ : str = self.get_scheduler_config(thresholding=a_ , dynamic_thresholding_ratio=0 )
a_ : Dict = scheduler_class(**a_ )
a_ : int = 1_0
a_ : List[str] = self.dummy_model()
a_ : Optional[int] = self.dummy_sample_deter.half()
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
a_ : Optional[int] = model(a_ , a_ )
a_ : Optional[Any] = scheduler.step(a_ , a_ , a_ ).prev_sample
assert sample.dtype == torch.floataa | 237 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
@dataclass
class __A :
"""simple docstring"""
A_ = 42
A_ = field(default_factory=a )
A_ = field(default_factory=a )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )-> Any:
lowercase__ = len(list(m.modules() ) ) == 1 or isinstance(_lowerCamelCase , nn.Convad ) or isinstance(_lowerCamelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_lowerCamelCase )
def __call__( self , _lowerCamelCase )-> Union[str, Any]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_lowerCamelCase )
[x.remove() for x in self.handles]
return self
@property
def snake_case_( self )-> Optional[Any]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _lowerCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __A :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 0
A_ = field(default_factory=a )
A_ = field(default_factory=a )
def __call__( self , _lowerCamelCase )-> List[str]:
lowercase__ = Tracker(self.dest )(_lowerCamelCase ).parametrized
lowercase__ = Tracker(self.src )(_lowerCamelCase ).parametrized
lowercase__ = list(filter(lambda _lowerCamelCase : type(_lowerCamelCase ) not in self.src_skip , _lowerCamelCase ) )
lowercase__ = list(filter(lambda _lowerCamelCase : type(_lowerCamelCase ) not in self.dest_skip , _lowerCamelCase ) )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(_lowerCamelCase )} operations while'''
f''' destination module has {len(_lowerCamelCase )}.''' )
for dest_m, src_m in zip(_lowerCamelCase , _lowerCamelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def _lowerCAmelCase ( lowercase : str , lowercase : ResNetConfig , lowercase : Path , lowercase : bool = True ) ->Optional[int]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
lowercase__ = timm.create_model(lowercase , pretrained=lowercase ).eval()
lowercase__ = ResNetForImageClassification(lowercase ).eval()
lowercase__ = ModuleTransfer(src=lowercase , dest=lowercase )
lowercase__ = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(lowercase )
assert torch.allclose(from_model(lowercase ) , our_model(lowercase ).logits ), "The model logits don't match the original one."
lowercase__ = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(lowercase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=lowercase , )
# we can use the convnext one
lowercase__ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=lowercase , )
print(F'''Pushed {checkpoint_name}''' )
def _lowerCAmelCase ( lowercase : Path , lowercase : str = None , lowercase : bool = True ) ->Optional[Any]:
"""simple docstring"""
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = 1_0_0_0
lowercase__ = (1, num_labels)
lowercase__ = '''huggingface/label-files'''
lowercase__ = num_labels
lowercase__ = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(lowercase ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = partial(lowercase , num_labels=lowercase , idalabel=lowercase , labelaid=lowercase )
lowercase__ = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(lowercase , names_to_config[model_name] , lowercase , lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowercase , lowercase , lowercase , lowercase )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 318 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class __A ( a ):
"""simple docstring"""
A_ = 'xglm'
A_ = ['past_key_values']
A_ = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _lowerCamelCase=2_5_6_0_0_8 , _lowerCamelCase=2_0_4_8 , _lowerCamelCase=1_0_2_4 , _lowerCamelCase=4_0_9_6 , _lowerCamelCase=2_4 , _lowerCamelCase=1_6 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , **_lowerCamelCase , )-> List[str]:
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = ffn_dim
lowercase__ = num_layers
lowercase__ = attention_heads
lowercase__ = activation_function
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = layerdrop
lowercase__ = init_std
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = use_cache
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
| 318 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
snake_case = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE : Any = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE : Dict = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE : List[Any] = "pretraining"
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : List[Any] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512}
SCREAMING_SNAKE_CASE : Tuple = "multichoice"
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : List[str] = {"visual_embedding_dim": 2048}
SCREAMING_SNAKE_CASE : str = "vqa_advanced"
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Tuple = {"visual_embedding_dim": 2048, "num_labels": 3129}
SCREAMING_SNAKE_CASE : Optional[Any] = "vqa"
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
SCREAMING_SNAKE_CASE : Tuple = "nlvr"
SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase )
# Load State Dict
SCREAMING_SNAKE_CASE : List[str] = load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Any = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE : int = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE : Tuple = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE : List[Any] = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 62 |
# Function to print upper half of diamond (pyramid)
def __UpperCamelCase ( _A ):
for i in range(0 , _A ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def __UpperCamelCase ( _A ):
for i in range(_A , 0 , -1 ):
for _ in range(_A , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def __UpperCamelCase ( _A ):
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(_A ) # upper half
reverse_floyd(_A ) # lower half
if __name__ == "__main__":
print(R'''| /\ | |- | |- |--| |\ /| |-''')
print(R'''|/ \| |- |_ |_ |__| | \/ | |_''')
_A = 1
while K:
_A = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
_A = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 431 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__a : Optional[Any] = logging.get_logger(__name__)
class A ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : str = ['''input_features''', '''is_longer''']
def __init__( self : Union[str, Any] , __UpperCAmelCase : Dict=64 , __UpperCAmelCase : List[Any]=48000 , __UpperCAmelCase : List[str]=480 , __UpperCAmelCase : List[Any]=10 , __UpperCAmelCase : Optional[Any]=1024 , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : Any=False , __UpperCAmelCase : float = 0 , __UpperCAmelCase : float = 14000 , __UpperCAmelCase : int = None , __UpperCAmelCase : str = "fusion" , __UpperCAmelCase : str = "repeatpad" , **__UpperCAmelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCamelCase_ = top_db
UpperCamelCase_ = truncation
UpperCamelCase_ = padding
UpperCamelCase_ = fft_window_size
UpperCamelCase_ = (fft_window_size >> 1) + 1
UpperCamelCase_ = hop_length
UpperCamelCase_ = max_length_s
UpperCamelCase_ = max_length_s * sampling_rate
UpperCamelCase_ = sampling_rate
UpperCamelCase_ = frequency_min
UpperCamelCase_ = frequency_max
UpperCamelCase_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__UpperCAmelCase , min_frequency=__UpperCAmelCase , max_frequency=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , norm=__UpperCAmelCase , mel_scale='htk' , )
UpperCamelCase_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__UpperCAmelCase , min_frequency=__UpperCAmelCase , max_frequency=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , norm='slaney' , mel_scale='slaney' , )
def lowercase__ ( self : Tuple ) -> Dict[str, Any]:
"""simple docstring"""
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : int , __UpperCAmelCase : np.array , __UpperCAmelCase : Optional[np.array] = None ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase_ = spectrogram(
__UpperCAmelCase , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__UpperCAmelCase , log_mel='dB' , )
return log_mel_spectrogram.T
def lowercase__ ( self : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCamelCase_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCamelCase_ = [0]
# randomly choose index for each part
UpperCamelCase_ = np.random.choice(ranges[0] )
UpperCamelCase_ = np.random.choice(ranges[1] )
UpperCamelCase_ = np.random.choice(ranges[2] )
UpperCamelCase_ = mel[idx_front : idx_front + chunk_frames, :]
UpperCamelCase_ = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCamelCase_ = mel[idx_back : idx_back + chunk_frames, :]
UpperCamelCase_ = torch.tensor(mel[None, None, :] )
UpperCamelCase_ = torch.nn.functional.interpolate(
__UpperCAmelCase , size=[chunk_frames, 64] , mode='bilinear' , align_corners=__UpperCAmelCase )
UpperCamelCase_ = mel_shrink[0][0].numpy()
UpperCamelCase_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowercase__ ( self : Optional[int] , __UpperCAmelCase : np.array , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : List[Any] ) -> np.array:
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCamelCase_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCamelCase_ = len(__UpperCAmelCase ) - max_length
UpperCamelCase_ = np.random.randint(0 , overflow + 1 )
UpperCamelCase_ = waveform[idx : idx + max_length]
UpperCamelCase_ = self._np_extract_fbank_features(__UpperCAmelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
UpperCamelCase_ = self._np_extract_fbank_features(__UpperCAmelCase , self.mel_filters )
UpperCamelCase_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCamelCase_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCamelCase_ = np.stack([mel, mel, mel, mel] , axis=0 )
UpperCamelCase_ = False
else:
UpperCamelCase_ = self._random_mel_fusion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase_ = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
UpperCamelCase_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCamelCase_ = int(max_length / len(__UpperCAmelCase ) )
UpperCamelCase_ = np.stack(np.tile(__UpperCAmelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
UpperCamelCase_ = int(max_length / len(__UpperCAmelCase ) )
UpperCamelCase_ = np.stack(np.tile(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCamelCase_ = np.pad(__UpperCAmelCase , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
UpperCamelCase_ = self._np_extract_fbank_features(__UpperCAmelCase , self.mel_filters )
UpperCamelCase_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
UpperCamelCase_ = self._np_extract_fbank_features(__UpperCAmelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int , __UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCAmelCase : str = None , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , **__UpperCAmelCase : Tuple , ) -> BatchFeature:
"""simple docstring"""
UpperCamelCase_ = truncation if truncation is not None else self.truncation
UpperCamelCase_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
UpperCamelCase_ = isinstance(__UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
UpperCamelCase_ = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase_ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
UpperCamelCase_ = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase_ = [np.asarray(__UpperCAmelCase )]
# convert to mel spectrogram, truncate and pad if needed.
UpperCamelCase_ = [
self._get_input_mel(__UpperCAmelCase , max_length if max_length else self.nb_max_samples , __UpperCAmelCase , __UpperCAmelCase )
for waveform in raw_speech
]
UpperCamelCase_ = []
UpperCamelCase_ = []
for mel, longer in padded_inputs:
input_mel.append(__UpperCAmelCase )
is_longer.append(__UpperCAmelCase )
if truncation == "fusion" and sum(__UpperCAmelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCamelCase_ = np.random.randint(0 , len(__UpperCAmelCase ) )
UpperCamelCase_ = True
if isinstance(input_mel[0] , __UpperCAmelCase ):
UpperCamelCase_ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
UpperCamelCase_ = [[longer] for longer in is_longer]
UpperCamelCase_ = {'input_features': input_mel, 'is_longer': is_longer}
UpperCamelCase_ = BatchFeature(__UpperCAmelCase )
if return_tensors is not None:
UpperCamelCase_ = input_features.convert_to_tensors(__UpperCAmelCase )
return input_features
| 559 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A :
def __init__( self : Optional[int] , __UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = data
UpperCamelCase_ = [0X6745_2301, 0XEFCD_AB89, 0X98BA_DCFE, 0X1032_5476, 0XC3D2_E1F0]
@staticmethod
def lowercase__ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XFFFF_FFFF
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = B'\x80' + B'\x00' * (63 - (len(self.data ) + 8) % 64)
UpperCamelCase_ = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) )
return padded_data
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def lowercase__ ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ = list(struct.unpack('>16L' , __UpperCAmelCase ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCamelCase_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def lowercase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.padding()
UpperCamelCase_ = self.split_blocks()
for block in self.blocks:
UpperCamelCase_ = self.expand_block(__UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCamelCase_ = (b & c) | ((~b) & d)
UpperCamelCase_ = 0X5A82_7999
elif 20 <= i < 40:
UpperCamelCase_ = b ^ c ^ d
UpperCamelCase_ = 0X6ED9_EBA1
elif 40 <= i < 60:
UpperCamelCase_ = (b & c) | (b & d) | (c & d)
UpperCamelCase_ = 0X8F1B_BCDC
elif 60 <= i < 80:
UpperCamelCase_ = b ^ c ^ d
UpperCamelCase_ = 0XCA62_C1D6
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = (
self.rotate(__UpperCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0XFFFF_FFFF,
a,
self.rotate(__UpperCAmelCase , 30 ),
c,
d,
)
UpperCamelCase_ = (
self.h[0] + a & 0XFFFF_FFFF,
self.h[1] + b & 0XFFFF_FFFF,
self.h[2] + c & 0XFFFF_FFFF,
self.h[3] + d & 0XFFFF_FFFF,
self.h[4] + e & 0XFFFF_FFFF,
)
return ("{:08x}" * 5).format(*self.h )
def a_ ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase_ = B'Test String'
assert SHAaHash(__snake_case ).final_hash() == hashlib.shaa(__snake_case ).hexdigest() # noqa: S324
def a_ ( ) -> str:
'''simple docstring'''
UpperCamelCase_ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
UpperCamelCase_ = f.read()
else:
UpperCamelCase_ = bytes(__snake_case , 'utf-8' )
print(SHAaHash(__snake_case ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 559 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
a = ["bert-base-uncased", "bert-base-cased"]
a = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class __a ( tf.keras.Model ):
def __init__( self : str ,lowerCamelCase : Tuple ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = TFAutoModel.from_config(lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.bert(**lowerCamelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = [
BertTokenizer.from_pretrained(lowerCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__SCREAMING_SNAKE_CASE = [TFBertTokenizer.from_pretrained(lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(lowerCamelCase ,use_fast_bert_tokenizer=lowerCamelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__SCREAMING_SNAKE_CASE = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: ไธ ไบ ไธ ไธไบไธ""",
"""And some much more rare Chinese: ้ฝ ๅ ้ฝๅ """,
"""Je vais aussi รฉcrire en franรงais pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaฤ, ๊ผ""",
]
__SCREAMING_SNAKE_CASE = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase ,return_tensors="""tf""" ,padding="""longest""" )
__SCREAMING_SNAKE_CASE = tf_tokenizer(lowerCamelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] ,tf.intaa ) == tf_outputs[key] ) )
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__SCREAMING_SNAKE_CASE = tf_tokenizer(self.paired_sentences )
__SCREAMING_SNAKE_CASE = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] ,text_pair=[sentence[1] for sentence in self.paired_sentences] ,)
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] ,tf.intaa ) == separated_outputs[key] ) )
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__SCREAMING_SNAKE_CASE = tf.function(lowerCamelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
__SCREAMING_SNAKE_CASE = tf.constant(lowerCamelCase )
__SCREAMING_SNAKE_CASE = compiled_tokenizer(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tf_tokenizer(lowerCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
__SCREAMING_SNAKE_CASE = ModelToSave(tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(self.test_sentences )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """saved.model"""
model.save(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tf.keras.models.load_model(lowerCamelCase )
__SCREAMING_SNAKE_CASE = loaded_model(lowerCamelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) ,1E-5 )
| 109 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowerCAmelCase_ = '\nHuman: <<task>>\n\nAssistant: '
lowerCAmelCase_ = 'huggingface-tools/default-prompts'
lowerCAmelCase_ = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def A__ ( A : Dict , A : List[str] , A : List[str]="run"):
'''simple docstring'''
if prompt_or_repo_id is None:
UpperCamelCase : Optional[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , A) is not None:
return prompt_or_repo_id
UpperCamelCase : int = cached_file(
A , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name})
with open(A , "r" , encoding="utf-8") as f:
return f.read()
| 173 | 0 |
from typing import Any
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
_validation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
# Creates data structures and fill initial step
__lowerCamelCase : dict = {}
__lowerCamelCase : dict = {}
for state in states_space:
__lowerCamelCase : List[str] = observations_space[0]
__lowerCamelCase : Union[str, Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__lowerCamelCase : Optional[int] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
__lowerCamelCase : str = observations_space[o]
__lowerCamelCase : Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__lowerCamelCase : Union[str, Any] = ''
__lowerCamelCase : Dict = -1
for k_state in states_space:
__lowerCamelCase : Dict = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__lowerCamelCase : List[str] = probability
__lowerCamelCase : List[str] = k_state
# Update probabilities and pointers dicts
__lowerCamelCase : Tuple = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__lowerCamelCase : int = arg_max
# The final observation
__lowerCamelCase : Optional[int] = observations_space[len(SCREAMING_SNAKE_CASE__ ) - 1]
# argmax for given final observation
__lowerCamelCase : Tuple = ''
__lowerCamelCase : Dict = -1
for k_state in states_space:
__lowerCamelCase : List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
__lowerCamelCase : List[Any] = probability
__lowerCamelCase : List[str] = k_state
__lowerCamelCase : str = arg_max
# Process pointers backwards
__lowerCamelCase : str = last_state
__lowerCamelCase : Union[str, Any] = []
for o in range(len(SCREAMING_SNAKE_CASE__ ) - 1 , -1 , -1 ):
result.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Tuple = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
_validate_not_empty(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
_validate_lists(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_validate_dicts(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_validate_list(SCREAMING_SNAKE_CASE__ , 'observations_space' )
_validate_list(SCREAMING_SNAKE_CASE__ , 'states_space' )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not isinstance(_object , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = f'{var_name} must be a list'
raise ValueError(SCREAMING_SNAKE_CASE__ )
else:
for x in _object:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = f'{var_name} must be a list of strings'
raise ValueError(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
_validate_dict(SCREAMING_SNAKE_CASE__ , 'initial_probabilities' , SCREAMING_SNAKE_CASE__ )
_validate_nested_dict(SCREAMING_SNAKE_CASE__ , 'transition_probabilities' )
_validate_nested_dict(SCREAMING_SNAKE_CASE__ , 'emission_probabilities' )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_validate_dict(_object , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for x in _object.values():
_validate_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
if not isinstance(_object , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = f'{var_name} must be a dict'
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not all(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for x in _object ):
__lowerCamelCase : Tuple = f'{var_name} all keys must be strings'
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not all(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for x in _object.values() ):
__lowerCamelCase : str = 'nested dictionary ' if nested else ''
__lowerCamelCase : str = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 230 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if point:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ):
__lowerCamelCase : List[Any] = (
'Expected a list of numbers as input, found '
f'{type(SCREAMING_SNAKE_CASE__ ).__name__}'
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase : Tuple = f'Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE__ ).__name__}'
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError('Missing an input' )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCamelCase__ : str = None
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Any = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ : str = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ : List[str] = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
lowerCamelCase__ : str = """โ"""
# Segments (not really needed)
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : str = 1
lowerCamelCase__ : int = 2
lowerCamelCase__ : Optional[Any] = 3
lowerCamelCase__ : Dict = 4
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = 'left'
__lowerCAmelCase : Dict = XLNetTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<sep>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<cls>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=["<eop>", "<eod>"] , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , remove_space=SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase__ : Union[str, Any] = 3
lowercase__ : Optional[Any] = do_lower_case
lowercase__ : Optional[int] = remove_space
lowercase__ : Any = keep_accents
lowercase__ : Optional[int] = vocab_file
lowercase__ : Optional[Any] = False if not self.vocab_file else True
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ : Optional[int] = [self.sep_token_id]
lowercase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ : Any = [self.sep_token_id]
lowercase__ : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""")
if not os.path.isdir(SCREAMING_SNAKE_CASE_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
lowercase__ : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE_):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_)
return (out_vocab_file,)
| 12 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : BigBirdConfig
__lowerCAmelCase : jnp.dtype = jnp.floataa
__lowerCAmelCase : bool = True
def lowercase__ ( self):
'''simple docstring'''
super().setup()
lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ):
lowercase__ : int = logits.shape[-1]
lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" )
lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 )
lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase__ : Optional[int] = reduction(lowercase_ )
return loss
lowercase__ : int = partial(lowercase_ , reduction=jnp.mean )
lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _snake_case :
__lowerCAmelCase : str = "google/bigbird-roberta-base"
__lowerCAmelCase : int = 3_000
__lowerCAmelCase : int = 10_500
__lowerCAmelCase : int = 128
__lowerCAmelCase : int = 3
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 5
# tx_args
__lowerCAmelCase : float = 3e-5
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 20_000
__lowerCAmelCase : float = 0.0_095
__lowerCAmelCase : str = "bigbird-roberta-natural-questions"
__lowerCAmelCase : str = "training-expt"
__lowerCAmelCase : str = "data/nq-training.jsonl"
__lowerCAmelCase : str = "data/nq-validation.jsonl"
def lowercase__ ( self):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = os.path.join(self.base_dir , self.save_dir)
lowercase__ : str = self.batch_size_per_device * jax.device_count()
@dataclass
class _snake_case :
__lowerCAmelCase : int
__lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs
def __call__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""])
lowercase__ : str = {
"""input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa),
}
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))]
while len(SCREAMING_SNAKE_CASE_) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
if seed is not None:
lowercase__ : Any = dataset.shuffle(seed=lowercase_ )
for i in range(len(lowercase_ ) // batch_size ):
lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase_ )
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int:
'''simple docstring'''
def loss_fn(lowercase_ ):
lowercase__ : Dict = model_inputs.pop("""start_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""end_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Any = outputs
return state.loss_fn(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ )
lowercase__ : Tuple = jax.value_and_grad(lowercase_ )
lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params )
lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" )
lowercase__ : str = state.apply_gradients(grads=lowercase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Tuple = model_inputs.pop("""start_labels""" )
lowercase__ : List[str] = model_inputs.pop("""end_labels""" )
lowercase__ : int = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs
lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class _snake_case ( train_state.TrainState ):
__lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ )
@dataclass
class _snake_case :
__lowerCAmelCase : Args
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : wandb
__lowerCAmelCase : Callable = None
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : List[str] = model.params
lowercase__ : Dict = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , )
if ckpt_dir is not None:
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = train_state.TrainState(
step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Optional[Any] = args
lowercase__ : Union[str, Any] = data_collator
lowercase__ : str = lr
lowercase__ : Union[str, Any] = params
lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_)
return state
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = self.args
lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size
lowercase__ : int = jax.random.PRNGKey(0)
lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count())
for epoch in range(args.max_epochs):
lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
if i % args.logging_steps == 0:
lowercase__ : List[str] = jax_utils.unreplicate(state.step)
lowercase__ : str = running_loss.item() / i
lowercase__ : Tuple = self.scheduler_fn(state_step - 1)
lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE_))
self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size)
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size
lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : Optional[Any] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
return running_loss / i
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """)
self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params)
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib"""))
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib"""))
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f:
json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_)
print("""DONE""")
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f:
lowercase__ : Optional[Any] = from_bytes(state.params , f.read() )
with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f:
lowercase__ : Dict = from_bytes(state.opt_state , f.read() )
lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) )
lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) )
with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f:
lowercase__ : int = json.load(lowercase_ )
lowercase__ : Optional[Any] = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Optional[int] = num_train_steps - warmup_steps
lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ )
lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ )
lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
def weight_decay_mask(lowercase_ ):
lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ )
lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase_ )
lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ )
return tx, lr
| 12 | 1 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowercase : Tuple = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 1_3_1_0_7_2,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
}
def __a ( A__ , A__ ) -> Optional[Any]:
return torch.atana(A__ , A__ ) / math.pi * 2
def __a ( A__ ) -> List[str]:
lowerCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
lowerCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(A__ , A__ )
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
pass
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
super().__init__()
lowerCAmelCase = DiffusionAttnUnetaD(SCREAMING_SNAKE_CASE , n_attn_layers=4 )
lowerCAmelCase = deepcopy(self.diffusion )
lowerCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=SCREAMING_SNAKE_CASE )
def __a ( A__ ) -> Dict:
lowerCAmelCase = MODELS_MAP[model_name]["url"]
os.system(f"wget {url} ./" )
return f"./{model_name}.ckpt"
lowercase : List[Any] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
lowercase : int = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
lowercase : Optional[Any] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
lowercase : List[Any] = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
lowercase : Optional[Any] = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
lowercase : Union[str, Any] = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def __a ( A__ ) -> str:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(f"ResConvBlock error with {name}" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __a ( A__ ) -> List[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(A__ ) and not isinstance(A__ , A__ ):
return name.replace(A__ , A__ )
elif name.startswith(A__ ):
return [name.replace(A__ , A__ ) for v in value]
raise ValueError(f"Attn error with {name}" )
def __a ( A__ , A__=13 ) -> str:
lowerCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
lowerCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
lowerCAmelCase = string[6:]
elif string.startswith("net." ):
lowerCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
lowerCAmelCase = string[7:]
if string.startswith("main." ):
lowerCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
lowerCAmelCase = string[:2]
lowerCAmelCase = string[2:]
else:
lowerCAmelCase = string[0]
lowerCAmelCase = string[1:]
if depth == max_depth:
lowerCAmelCase = MID_NUM_TO_LAYER[layer_num]
lowerCAmelCase = "mid_block"
elif depth > 0 and int(A__ ) < 7:
lowerCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
lowerCAmelCase = f"down_blocks.{depth}"
elif depth > 0 and int(A__ ) > 7:
lowerCAmelCase = UP_NUM_TO_LAYER[layer_num]
lowerCAmelCase = f"up_blocks.{max_depth - depth - 1}"
elif depth == 0:
lowerCAmelCase = DEPTH_0_TO_LAYER[layer_num]
lowerCAmelCase = f"up_blocks.{max_depth - 1}" if int(A__ ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(f"Naming error with {input_string} and string_left: {string_left}." )
lowerCAmelCase = string_left[1:]
if "resnets" in new_layer:
lowerCAmelCase = convert_resconv_naming(A__ )
elif "attentions" in new_layer:
lowerCAmelCase = convert_attn_naming(A__ )
lowerCAmelCase = new_string_left
if not isinstance(A__ , A__ ):
lowerCAmelCase = prefix + "." + new_layer + "." + string_left
else:
lowerCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __a ( A__ ) -> str:
lowerCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
lowerCAmelCase = rename(A__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(A__ , A__ ):
lowerCAmelCase = transform_conv_attns(A__ , A__ , A__ )
else:
lowerCAmelCase = v
return new_state_dict
def __a ( A__ , A__ , A__ ) -> Any:
if len(A__ ) == 1:
if len(v.shape ) == 3:
# weight
lowerCAmelCase = v[:, :, 0]
else:
# bias
lowerCAmelCase = v
else:
# qkv matrices
lowerCAmelCase = v.shape[0]
lowerCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowerCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowerCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __a ( A__ ) -> Dict:
lowerCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"Make sure to provide one of the official model names {MODELS_MAP.keys()}"
lowerCAmelCase = download(A__ )
lowerCAmelCase = MODELS_MAP[model_name]["sample_rate"]
lowerCAmelCase = MODELS_MAP[model_name]["sample_size"]
lowerCAmelCase = Object()
lowerCAmelCase = sample_size
lowerCAmelCase = sample_rate
lowerCAmelCase = 0
lowerCAmelCase = UNetaDModel(sample_size=A__ , sample_rate=A__ )
lowerCAmelCase = diffusers_model.state_dict()
lowerCAmelCase = DiffusionUncond(A__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=A__ )["state_dict"] )
lowerCAmelCase = orig_model.diffusion_ema.eval()
lowerCAmelCase = orig_model.state_dict()
lowerCAmelCase = rename_orig_weights(A__ )
lowerCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowerCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(A__ ) == 0, f"Problem with {renamed_minus_diffusers}"
assert all(k.endswith("kernel" ) for k in list(A__ ) ), f"Problem with {diffusers_minus_renamed}"
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"
if key == "time_proj.weight":
lowerCAmelCase = value.squeeze()
lowerCAmelCase = value
diffusers_model.load_state_dict(A__ )
lowerCAmelCase = 100
lowerCAmelCase = 33
lowerCAmelCase = IPNDMScheduler(num_train_timesteps=A__ )
lowerCAmelCase = torch.manual_seed(A__ )
lowerCAmelCase = torch.randn([1, 2, config.sample_size] , generator=A__ ).to(A__ )
lowerCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=A__ )[:-1]
lowerCAmelCase = get_crash_schedule(A__ )
lowerCAmelCase = DanceDiffusionPipeline(unet=A__ , scheduler=A__ )
lowerCAmelCase = torch.manual_seed(33 )
lowerCAmelCase = pipe(num_inference_steps=A__ , generator=A__ ).audios
lowerCAmelCase = sampling.iplms_sample(A__ , A__ , A__ , {} )
lowerCAmelCase = generated.clamp(-1 , 1 )
lowerCAmelCase = (generated - audio).abs().sum()
lowerCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , A__ )
print("Diff max" , A__ )
assert diff_max < 1e-3, f"Diff max: {diff_max} is too much :-/"
print(f"Conversion for {model_name} successful!" )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
lowercase : Tuple = parser.parse_args()
main(args)
| 159 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __a ( A__ ) -> Any:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __a ( A__ , A__ ) -> List[str]:
lowerCAmelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCAmelCase = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
lowerCAmelCase = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
lowerCAmelCase = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
lowerCAmelCase = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
lowerCAmelCase = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
lowerCAmelCase = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
lowerCAmelCase = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
lowerCAmelCase = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
lowerCAmelCase = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
lowerCAmelCase = key.replace("image_encoder.module" , "flava.image_model" )
lowerCAmelCase = key.replace("text_encoder.module" , "flava.text_model" )
lowerCAmelCase = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
lowerCAmelCase = key.replace("mm_encoder.module" , "flava.multimodal_model" )
lowerCAmelCase = key.replace("text_projection" , "flava.text_projection" )
lowerCAmelCase = key.replace("image_projection" , "flava.image_projection" )
lowerCAmelCase = value.float()
for key, value in codebook_state_dict.items():
lowerCAmelCase = value
return upgrade
@torch.no_grad()
def __a ( A__ , A__ , A__ , A__=None ) -> str:
if config_path is not None:
lowerCAmelCase = FlavaConfig.from_pretrained(A__ )
else:
lowerCAmelCase = FlavaConfig()
lowerCAmelCase = FlavaForPreTraining(A__ ).eval()
lowerCAmelCase = convert_dalle_checkpoint(A__ , A__ , save_checkpoint=A__ )
if os.path.exists(A__ ):
lowerCAmelCase = torch.load(A__ , map_location="cpu" )
else:
lowerCAmelCase = torch.hub.load_state_dict_from_url(A__ , map_location="cpu" )
lowerCAmelCase = upgrade_state_dict(A__ , A__ )
hf_model.load_state_dict(A__ )
lowerCAmelCase = hf_model.state_dict()
lowerCAmelCase = count_parameters(A__ )
lowerCAmelCase = count_parameters(A__ ) + count_parameters(A__ )
assert torch.allclose(A__ , A__ , atol=1e-3 )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowercase : List[Any] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 159 | 1 |
from manim import *
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ : int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase_ : List[Any] = [mem.copy() for i in range(6 )]
lowerCamelCase_ : Tuple = [mem.copy() for i in range(6 )]
lowerCamelCase_ : List[Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowerCamelCase_ : Tuple = VGroup(*a_ ).arrange(a_ , buff=0 )
lowerCamelCase_ : Optional[int] = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
lowerCamelCase_ : Optional[Any] = Text("CPU" , font_size=24 )
lowerCamelCase_ : str = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a_ )
lowerCamelCase_ : str = [mem.copy() for i in range(1 )]
lowerCamelCase_ : Dict = VGroup(*a_ ).arrange(a_ , buff=0 )
lowerCamelCase_ : Dict = Text("GPU" , font_size=24 )
lowerCamelCase_ : int = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
gpu.align_to(a_ , a_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(a_ )
lowerCamelCase_ : Tuple = [mem.copy() for i in range(6 )]
lowerCamelCase_ : Tuple = VGroup(*a_ ).arrange(a_ , buff=0 )
lowerCamelCase_ : Union[str, Any] = Text("Model" , font_size=24 )
lowerCamelCase_ : str = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(a_ , run_time=1 ) , Create(a_ , run_time=1 ) , Create(a_ , run_time=1 ) , )
lowerCamelCase_ : Optional[int] = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
lowerCamelCase_ : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ : Tuple = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>โ</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ , run_time=2.5 ) , Write(a_ ) , Write(a_ ) )
self.add(a_ )
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : int = []
lowerCamelCase_ : Tuple = []
for i, rect in enumerate(a_ ):
lowerCamelCase_ : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(a_ , opacity=0.7 )
cpu_target.move_to(a_ )
cpu_target.generate_target()
lowerCamelCase_ : int = 0.46 / 4
lowerCamelCase_ : List[Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=a_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=a_ , buff=0.0 )
cpu_targs.append(a_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(a_ ) )
second_animations.append(MoveToTarget(a_ , run_time=1.5 ) )
self.play(*a_ )
self.play(*a_ )
self.wait()
| 250 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = CanineTokenizer
__UpperCAmelCase : int = False
def _UpperCamelCase ( self ):
super().setUp()
lowerCamelCase_ : int = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ):
return CanineTokenizer.from_pretrained("google/canine-s" )
def _UpperCamelCase ( self , **a_ ):
lowerCamelCase_ : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
lowerCamelCase_ : Dict = 1024
return tokenizer
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.canine_tokenizer
lowerCamelCase_ : str = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
lowerCamelCase_ : Dict = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
lowerCamelCase_ : List[Any] = tokenizer(a_ , padding=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
lowerCamelCase_ : List[str] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(a_ , a_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.canine_tokenizer
lowerCamelCase_ : Tuple = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
lowerCamelCase_ : Union[str, Any] = tokenizer(a_ , padding=a_ , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , a_ )
self.assertIn("attention_mask" , a_ )
self.assertIn("token_type_ids" , a_ )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.canine_tokenizer
lowerCamelCase_ : Tuple = [
"What's the weater?",
"It's about 25 degrees.",
]
lowerCamelCase_ : Optional[Any] = tokenizer(
text_target=a_ , max_length=32 , padding="max_length" , truncation=a_ , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _UpperCamelCase ( self ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCamelCase_ : Dict = " He is very happy, UNwant\u00E9d,running"
lowerCamelCase_ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
lowerCamelCase_ : Union[str, Any] = tokenizer.__class__.from_pretrained(a_ )
lowerCamelCase_ : List[Any] = after_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
shutil.rmtree(a_ )
lowerCamelCase_ : List[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ : List[Any] = tempfile.mkdtemp()
lowerCamelCase_ : Tuple = " He is very happy, UNwant\u00E9d,running"
lowerCamelCase_ : Dict = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCamelCase_ : List[str] = chr(0Xe007 )
additional_special_tokens.append(a_ )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowerCamelCase_ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
lowerCamelCase_ : Any = tokenizer.__class__.from_pretrained(a_ )
lowerCamelCase_ : Any = after_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
self.assertIn(a_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase_ : int = tokenizer.__class__.from_pretrained(a_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ ,lowerCamelCase_ : str = self.get_clean_sequence(a_ )
# a special token for Canine can be defined as follows:
lowerCamelCase_ : Tuple = 0Xe005
lowerCamelCase_ : Dict = chr(a_ )
tokenizer.add_special_tokens({"cls_token": special_token} )
lowerCamelCase_ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertEqual(len(a_ ) , 1 )
lowerCamelCase_ : List[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=a_ )
lowerCamelCase_ : List[Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Dict = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Any = tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertEqual(a_ , input_encoded + special_token_id )
lowerCamelCase_ : Optional[int] = tokenizer.decode(a_ , skip_special_tokens=a_ )
self.assertTrue(special_token not in decoded )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Optional[int] = chr(0Xe005 )
lowerCamelCase_ : str = chr(0Xe006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=a_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
lowerCamelCase_ : Tuple = tokenizer.tokenize(a_ )
lowerCamelCase_ : List[Any] = tokenizer.tokenize(a_ )
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(token_a[0] , a_ )
self.assertEqual(token_a[0] , a_ )
@require_tokenizers
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# a special token for Canine can be defined as follows:
lowerCamelCase_ : List[str] = 0Xe006
lowerCamelCase_ : Any = chr(a_ )
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(a_ )
tokenizer.from_pretrained(a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowerCamelCase_ : List[Any] = json.load(a_ )
with open(os.path.join(a_ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowerCamelCase_ : int = json.load(a_ )
# a special token for Canine can be defined as follows:
lowerCamelCase_ : Any = 0Xe006
lowerCamelCase_ : List[Any] = chr(a_ )
lowerCamelCase_ : Any = [new_token_a]
lowerCamelCase_ : Optional[Any] = [new_token_a]
with open(os.path.join(a_ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a_ , a_ )
with open(os.path.join(a_ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a_ , a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase_ : str = tokenizer_class.from_pretrained(a_ , extra_ids=0 )
self.assertIn(a_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCamelCase_ : Optional[int] = 0Xe007
lowerCamelCase_ : List[str] = chr(a_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase_ : int = [AddedToken(a_ , lstrip=a_ )]
lowerCamelCase_ : Dict = tokenizer_class.from_pretrained(
a_ , additional_special_tokens=a_ , extra_ids=0 )
self.assertIn(a_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Union[str, Any] = "hello world"
if self.space_between_special_tokens:
lowerCamelCase_ : int = "[CLS] hello world [SEP]"
else:
lowerCamelCase_ : int = input
lowerCamelCase_ : Optional[Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Any = tokenizer.decode(a_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(a_ , [output, output.lower()] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Tuple = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowerCamelCase_ : Optional[int] = "a"
lowerCamelCase_ : Dict = ord(a_ )
for attr in attributes_list:
setattr(a_ , attr + "_id" , a_ )
self.assertEqual(getattr(a_ , a_ ) , a_ )
self.assertEqual(getattr(a_ , attr + "_id" ) , a_ )
setattr(a_ , attr + "_id" , a_ )
self.assertEqual(getattr(a_ , a_ ) , a_ )
self.assertEqual(getattr(a_ , attr + "_id" ) , a_ )
setattr(a_ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(a_ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(a_ , "additional_special_tokens_ids" ) , [] )
lowerCamelCase_ : Optional[int] = 0Xe006
lowerCamelCase_ : List[str] = chr(a_ )
setattr(a_ , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(a_ , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(a_ , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
| 250 | 1 |
"""simple docstring"""
def _A ( _a : int , _a : int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def _A ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 255 |
"""simple docstring"""
from itertools import count
def _A ( _a : int = 5_0 ):
"""simple docstring"""
A = [1] * min_block_length
for n in count(_a ):
fill_count_functions.append(1 )
for block_length in range(_a , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 255 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : int = BlenderbotSmallTokenizer
UpperCAmelCase__ : Any = False
def snake_case_ ( self ) -> Any:
super().setUp()
UpperCamelCase : Dict = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
UpperCamelCase : int = dict(zip(SCREAMING_SNAKE_CASE_, range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase : List[str] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
UpperCamelCase : int = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
UpperCamelCase : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : List[str] = 'adapt act apte'
UpperCamelCase : List[Any] = 'adapt act apte'
return input_text, output_text
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = BlenderbotSmallTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase : List[Any] = 'adapt act apte'
UpperCamelCase : List[Any] = ['adapt', 'act', 'ap@@', 'te']
UpperCamelCase : Optional[int] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCamelCase : Union[str, Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
UpperCamelCase : List[str] = 'I am a small frog.'
UpperCamelCase : Optional[Any] = tok([src_text], padding=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_ )['input_ids']
UpperCamelCase : int = tok.batch_decode(SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
UpperCamelCase : Tuple = 'I am a small frog .'
UpperCamelCase : List[str] = '.'
UpperCamelCase : str = tok(SCREAMING_SNAKE_CASE_ )['input_ids']
UpperCamelCase : List[Any] = tok(SCREAMING_SNAKE_CASE_ )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 40 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : str = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 649 | 0 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowerCamelCase ( pl.LightningModule ):
def __init__( self , __lowerCamelCase ) -> int:
'''simple docstring'''
super().__init__()
snake_case: List[Any] = model
snake_case: Optional[Any] = 2
snake_case: Optional[Any] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
pass
def a_ (_lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str )-> Optional[Any]:
# load longformer model from model identifier
snake_case: List[str] = LongformerModel.from_pretrained(_lowerCAmelCase )
snake_case: str = LightningModel(_lowerCAmelCase )
snake_case: Optional[Any] = torch.load(_lowerCAmelCase , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
snake_case: List[str] = LongformerForQuestionAnswering.from_pretrained(_lowerCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_lowerCAmelCase )
print(F"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
__lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 164 | from __future__ import annotations
def a_ (_lowerCAmelCase : int )-> list[int]:
snake_case: List[str] = 2
snake_case: Dict = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_lowerCAmelCase )
if n > 1:
factors.append(_lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
snake_case_ = DetaConfig(
backbone_config=SCREAMING_SNAKE_CASE__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=SCREAMING_SNAKE_CASE__ , with_box_refine=SCREAMING_SNAKE_CASE__ , two_stage=SCREAMING_SNAKE_CASE__ , )
# set labels
snake_case_ = '''huggingface/label-files'''
if "o365" in model_name:
snake_case_ = 366
snake_case_ = '''object365-id2label.json'''
else:
snake_case_ = 91
snake_case_ = '''coco-detection-id2label.json'''
snake_case_ = num_labels
snake_case_ = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) ) , '''r''' ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = dct.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ = val
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case_ = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
snake_case_ = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[:dim, :]
snake_case_ = in_proj_bias[: dim]
snake_case_ = in_proj_weight[
dim : dim * 2, :
]
snake_case_ = in_proj_bias[
dim : dim * 2
]
snake_case_ = in_proj_weight[
-dim :, :
]
snake_case_ = in_proj_bias[-dim :]
# fmt: on
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# transformer decoder self-attention layers
snake_case_ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
snake_case_ = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
snake_case_ = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[:hidden_size, :]
snake_case_ = in_proj_bias[:hidden_size]
snake_case_ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
snake_case_ = in_proj_bias[hidden_size : hidden_size * 2]
snake_case_ = in_proj_weight[-hidden_size:, :]
snake_case_ = in_proj_bias[-hidden_size:]
def __SCREAMING_SNAKE_CASE ():
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = get_deta_config(SCREAMING_SNAKE_CASE__ )
# load original state dict
if model_name == "deta-swin-large":
snake_case_ = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
snake_case_ = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE__ , param.shape )
# rename keys
snake_case_ = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ = val
if "input_proj" in key:
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ = val
# finally, create HuggingFace model and load state dict
snake_case_ = DetaForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(SCREAMING_SNAKE_CASE__ )
# load image processor
snake_case_ = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
snake_case_ = prepare_img()
snake_case_ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
snake_case_ = encoding['''pixel_values''']
snake_case_ = model(pixel_values.to(SCREAMING_SNAKE_CASE__ ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
snake_case_ = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
snake_case_ = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
snake_case_ = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
snake_case_ = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(SCREAMING_SNAKE_CASE__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(SCREAMING_SNAKE_CASE__ ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(F'''jozhang97/{model_name}''' )
processor.push_to_hub(F'''jozhang97/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the ๐ค hub.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 39 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __a ):
snake_case : Optional[int] = ["""audio_values""", """audio_mask"""]
def __init__(self , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=1 , lowerCAmelCase__=[1_6, 1_6] , lowerCAmelCase__=1_2_8 , lowerCAmelCase__=4_4_1_0_0 , lowerCAmelCase__=8_6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=0.0 , **lowerCAmelCase__ , ):
super().__init__(
feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : Union[str, Any] = spectrogram_length
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : Optional[Any] = patch_size
_UpperCAmelCase : List[Any] = feature_size // self.patch_size[1]
_UpperCAmelCase : List[str] = n_fft
_UpperCAmelCase : int = sampling_rate // hop_length_to_sampling_rate
_UpperCAmelCase : Optional[Any] = sampling_rate
_UpperCAmelCase : List[Any] = padding_value
_UpperCAmelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=lowerCAmelCase__ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Dict = spectrogram(
lowerCAmelCase__ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=8_0.0 , )
_UpperCAmelCase : Tuple = log_spec[:, :-1]
_UpperCAmelCase : Union[str, Any] = log_spec - 2_0.0
_UpperCAmelCase : List[Any] = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , **lowerCAmelCase__ , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_UpperCAmelCase : List[str] = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
_UpperCAmelCase : Tuple = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
_UpperCAmelCase : Dict = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : Any = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_UpperCAmelCase : Dict = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_UpperCAmelCase : Union[str, Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_UpperCAmelCase : Optional[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_UpperCAmelCase : Optional[Any] = np.array(lowerCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
_UpperCAmelCase : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_UpperCAmelCase : Dict = np.ones([len(lowerCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_UpperCAmelCase : Dict = padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase__ ) ):
_UpperCAmelCase : List[Any] = audio_features[i]
_UpperCAmelCase : Tuple = feature
# return as BatchFeature
if return_attention_mask:
_UpperCAmelCase : List[Any] = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
_UpperCAmelCase : List[Any] = {"""audio_values""": padded_audio_features}
_UpperCAmelCase : List[str] = BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
return encoded_inputs
| 414 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->List[str]:
return choice(lowerCamelCase_ )
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
UpperCAmelCase__ = random_pivot(lowerCamelCase_ )
# partition based on pivot
# linear time
UpperCAmelCase__ = [e for e in lst if e < pivot]
UpperCAmelCase__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCamelCase_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCamelCase_ ) < k - 1:
return kth_number(lowerCamelCase_ , k - len(lowerCamelCase_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
UpperCAmelCase__ = old_name
if "patch_embed" in old_name:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = old_name.split(""".""" )
if layer == "0":
UpperCAmelCase__ = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
UpperCAmelCase__ = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
UpperCAmelCase__ = old_name.replace("""3""" , """convolution2""" )
else:
UpperCAmelCase__ = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(r"""\d\.\d""" , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = r"""\b\d{2}\b"""
if bool(re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase__ = re.search(r"""\d\.\d\d.""" , _SCREAMING_SNAKE_CASE ).group()
else:
UpperCAmelCase__ = re.search(r"""\d\.\d.""" , _SCREAMING_SNAKE_CASE ).group()
if int(match[0] ) < 6:
UpperCAmelCase__ = old_name.replace(_SCREAMING_SNAKE_CASE , """""" )
UpperCAmelCase__ = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
UpperCAmelCase__ = """intermediate_stages.""" + trimmed_name
else:
UpperCAmelCase__ = old_name.replace(_SCREAMING_SNAKE_CASE , """""" )
if int(match[2] ) < num_meta4D_last_stage:
UpperCAmelCase__ = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
UpperCAmelCase__ = str(int(match[2] ) - num_meta4D_last_stage )
UpperCAmelCase__ = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
UpperCAmelCase__ = trimmed_name.replace("""fc2""" , """linear_out""" )
UpperCAmelCase__ = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
UpperCAmelCase__ = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
UpperCAmelCase__ = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
UpperCAmelCase__ = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
UpperCAmelCase__ = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
UpperCAmelCase__ = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
UpperCAmelCase__ = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
UpperCAmelCase__ = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
UpperCAmelCase__ = new_name.replace("""norm""" , """layernorm""" )
UpperCAmelCase__ = """efficientformer.""" + new_name
else:
UpperCAmelCase__ = """efficientformer.encoder.""" + new_name
return new_name
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
for key in checkpoint.copy().keys():
UpperCAmelCase__ = checkpoint.pop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = val
return checkpoint
def snake_case__ ( ) ->Optional[Any]:
UpperCAmelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
UpperCAmelCase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
UpperCAmelCase__ = EfficientFormerConfig.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = EfficientFormerForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
UpperCAmelCase__ = config.depths[-1] - config.num_metaad_blocks + 1
UpperCAmelCase__ = convert_torch_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase__ = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = 2_5_6
UpperCAmelCase__ = 2_2_4
UpperCAmelCase__ = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
UpperCAmelCase__ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
# original processing pipeline
UpperCAmelCase__ = Compose(
[
Resize(_SCREAMING_SNAKE_CASE , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(_SCREAMING_SNAKE_CASE ),
ToTensor(),
Normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
] )
UpperCAmelCase__ = image_transforms(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = outputs.logits
UpperCAmelCase__ = (1, 1_0_0_0)
if "l1" in model_name:
UpperCAmelCase__ = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :1_0] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
UpperCAmelCase__ = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :1_0] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
UpperCAmelCase__ = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add model""" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add image processor""" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
a : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 422 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _UpperCamelCase ):
"""simple docstring"""
_lowercase = (UnCLIPScheduler,)
def _UpperCamelCase( self : List[Any] , **lowerCamelCase__ : List[Any] ):
a__ : Tuple = {
"""num_train_timesteps""": 1_000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**lowerCamelCase__ )
return config
def _UpperCamelCase( self : int ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def _UpperCamelCase( self : Tuple ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCamelCase__ )
def _UpperCamelCase( self : Any ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase__ )
def _UpperCamelCase( self : str ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowerCamelCase__ )
def _UpperCamelCase( self : int ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCamelCase__ , prev_timestep=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : str = self.scheduler_classes[0]
a__ : Optional[int] = self.get_scheduler_config(variance_type="fixed_small_log" )
a__ : int = scheduler_class(**lowerCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def _UpperCamelCase( self : Optional[Any] ):
a__ : Any = self.scheduler_classes[0]
a__ : Dict = self.get_scheduler_config(variance_type="learned_range" )
a__ : Any = scheduler_class(**lowerCamelCase__ )
a__ : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCamelCase__ ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowerCamelCase__ ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowerCamelCase__ ) - -0.001_0011 < 1E-5
def _UpperCamelCase( self : List[str] ):
a__ : Union[str, Any] = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config()
a__ : Dict = scheduler_class(**lowerCamelCase__ )
a__ : Union[str, Any] = scheduler.timesteps
a__ : Optional[int] = self.dummy_model()
a__ : str = self.dummy_sample_deter
a__ : Tuple = torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase__ ):
# 1. predict noise residual
a__ : List[Any] = model(lowerCamelCase__ , lowerCamelCase__ )
# 2. predict previous mean of sample x_t-1
a__ : Optional[int] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ ).prev_sample
a__ : Tuple = pred_prev_sample
a__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase__ ) )
a__ : Dict = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def _UpperCamelCase( self : Optional[int] ):
a__ : Any = self.scheduler_classes[0]
a__ : Dict = self.get_scheduler_config()
a__ : str = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(25 )
a__ : Optional[int] = scheduler.timesteps
a__ : Optional[Any] = self.dummy_model()
a__ : int = self.dummy_sample_deter
a__ : Union[str, Any] = torch.manual_seed(0 )
for i, t in enumerate(lowerCamelCase__ ):
# 1. predict noise residual
a__ : List[str] = model(lowerCamelCase__ , lowerCamelCase__ )
if i + 1 == timesteps.shape[0]:
a__ : int = None
else:
a__ : Optional[Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
a__ : str = scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , prev_timestep=lowerCamelCase__ , generator=lowerCamelCase__ ).prev_sample
a__ : Dict = pred_prev_sample
a__ : Optional[Any] = torch.sum(torch.abs(lowerCamelCase__ ) )
a__ : Any = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def _UpperCamelCase( self : List[Any] ):
pass
def _UpperCamelCase( self : Any ):
pass
| 37 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ : str = ProphetNetTokenizer
__magic_name__ : Dict = False
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().setUp()
__UpperCamelCase : Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__UpperCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = """UNwant\u00E9d,running"""
__UpperCamelCase : List[str] = """unwanted, running"""
return input_text, output_text
def lowerCamelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__UpperCamelCase : Optional[int] = self.tokenizer_class(self.vocab_file )
__UpperCamelCase : List[str] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def lowerCamelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowerCamelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : Any = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
__UpperCamelCase : List[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHรคLLo!how \n Are yoU? """ ) , ["""hรคllo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHรคLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase : Dict = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHรคLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHรคLLo!how \n Are yoU? """ ) , ["""HรคLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : List[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHรคLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__UpperCamelCase : int = BasicTokenizer(do_lower_case=lowerCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__UpperCamelCase : List[Any] = {}
for i, token in enumerate(lowerCAmelCase ):
__UpperCamelCase : Dict = i
__UpperCamelCase : Any = WordpieceTokenizer(vocab=lowerCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__UpperCamelCase : str = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__UpperCamelCase : int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCamelCase : Optional[int] = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__UpperCamelCase : Optional[int] = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
__UpperCamelCase : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowerCamelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def lowerCamelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : List[str] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__UpperCamelCase : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase )
__UpperCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase )
__UpperCamelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 279 | 0 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=_snake_case ):
"""simple docstring"""
__A : Union[str, Any] = ["""transformers""", """torch""", """note_seq"""]
def __init__( self , *lowercase , **lowercase) -> List[str]:
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'])
@classmethod
def __lowercase ( cls , *lowercase , **lowercase) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'])
@classmethod
def __lowercase ( cls , *lowercase , **lowercase) -> int:
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'])
| 721 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : Optional[int] = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : List[Any] = '''vit_msn'''
def __init__( self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-06 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=True , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(**lowercase)
a__ : Tuple = hidden_size
a__ : Optional[Any] = num_hidden_layers
a__ : str = num_attention_heads
a__ : Optional[Any] = intermediate_size
a__ : Optional[Any] = hidden_act
a__ : int = hidden_dropout_prob
a__ : Optional[int] = attention_probs_dropout_prob
a__ : List[Any] = initializer_range
a__ : Optional[int] = layer_norm_eps
a__ : List[str] = image_size
a__ : Optional[int] = patch_size
a__ : List[str] = num_channels
a__ : Dict = qkv_bias
| 392 | 0 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase( __lowerCamelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase( unittest.TestCase ):
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
__a : int = ort.SessionOptions()
__a : str = False
return options
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__a : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a : int = 'A red cat sitting on a park bench'
__a : Union[str, Any] = np.random.RandomState(0 )
__a : Any = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
__a : Dict = output.images
__a : List[str] = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__a : Dict = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__a : Optional[int] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
__a : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = 'A red cat sitting on a park bench'
__a : Optional[int] = np.random.RandomState(0 )
__a : List[str] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
__a : Tuple = output.images
__a : Union[str, Any] = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__a : Optional[Any] = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 47 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : Tuple ) -> Any:
try:
with open(lowerCamelCase__ , "rb" ) as flax_state_f:
lowerCamelCase_ : int =from_bytes(lowerCamelCase__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowerCamelCase__ ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(lowerCamelCase__ , lowerCamelCase__ )
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] ) -> Tuple:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
lowerCamelCase_ : Dict =flatten_dict(jax.tree_util.tree_map(lambda lowerCamelCase__ : x.dtype == jnp.bfloataa , lowerCamelCase__ ) ).values()
if any(lowerCamelCase__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
lowerCamelCase_ : Optional[Any] =jax.tree_util.tree_map(
lambda lowerCamelCase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =""
lowerCamelCase_ : str =flatten_dict(lowerCamelCase__ , sep="." )
lowerCamelCase_ : Dict =pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCamelCase_ : Optional[Any] =[]
lowerCamelCase_ : Union[str, Any] =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase_ : str =flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCamelCase_ : Any =flax_key_tuple_array[:-1] + ["weight"]
lowerCamelCase_ : List[str] =jnp.transpose(lowerCamelCase__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCamelCase_ : Dict =flax_key_tuple_array[:-1] + ["weight"]
lowerCamelCase_ : Optional[int] =flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCamelCase_ : List[str] =flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowerCamelCase__ ):
lowerCamelCase_ : Optional[int] =(
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
lowerCamelCase_ : str =".".join(lowerCamelCase__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowerCamelCase_ : Optional[int] =np.asarray(lowerCamelCase__ ) if not isinstance(lowerCamelCase__ , np.ndarray ) else flax_tensor
lowerCamelCase_ : Union[str, Any] =torch.from_numpy(lowerCamelCase__ )
# remove from missing keys
missing_keys.remove(lowerCamelCase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCamelCase__ )
pt_model.load_state_dict(lowerCamelCase__ )
# re-transform missing_keys to list
lowerCamelCase_ : Dict =list(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(lowerCamelCase__ ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
" use it for predictions and inference." )
return pt_model
| 153 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = LEDConfig
UpperCamelCase_ : int = {}
UpperCamelCase_ : Union[str, Any] = '''gelu'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[str]=99 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : List[str]=37 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Union[str, Any]=20 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Tuple=4 , ):
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = eos_token_id
SCREAMING_SNAKE_CASE : List[Any] = pad_token_id
SCREAMING_SNAKE_CASE : List[str] = bos_token_id
SCREAMING_SNAKE_CASE : Dict = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE : str = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : int = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE : List[str] = prepare_led_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.concat(
[tf.zeros_like(UpperCAmelCase_ )[:, :-1], tf.ones_like(UpperCAmelCase_ )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE : Optional[int] = global_attention_mask
return config, inputs_dict
def _A ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE : Dict = TFLEDModel(config=UpperCAmelCase_ ).get_decoder()
SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE : Tuple = input_ids[:1, :]
SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE : Optional[int] = 1
# first forward pass
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : List[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-3 )
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ):
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE : Dict = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase_ : Optional[Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase_ : str = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : int = True
UpperCamelCase_ : str = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : str = False
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : List[str] = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=UpperCAmelCase_ )
def _A ( self : List[Any] ):
self.config_tester.run_common_tests()
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.zeros_like(inputs_dict["attention_mask"] )
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : Optional[int] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Dict = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[int] = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE : Optional[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Any = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : List[str] = len(UpperCAmelCase_ )
self.assertEqual(config.output_hidden_states , UpperCAmelCase_ )
check_encoder_attentions_output(UpperCAmelCase_ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Optional[int] = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase_ )
check_decoder_attentions_output(UpperCAmelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase_ )
check_encoder_attentions_output(UpperCAmelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase_ ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase_ )
check_encoder_attentions_output(UpperCAmelCase_ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _A ( self : int ):
pass
def _A ( self : str ):
# TODO: Head-masking not yet implement
pass
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return tf.constant(lowercase , dtype=tf.intaa )
snake_case = 1e-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : str = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
SCREAMING_SNAKE_CASE : Optional[Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE : int = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE : List[Any] = prepare_led_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model(**UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : int = (1, 1024, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
# change to expected output here
SCREAMING_SNAKE_CASE : List[str] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-3 )
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : str = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
SCREAMING_SNAKE_CASE : Optional[Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE : str = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
SCREAMING_SNAKE_CASE : Tuple = prepare_led_inputs_dict(model.config , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(**UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : Dict = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , UpperCAmelCase_ )
# change to expected output here
SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-3 , rtol=1E-3 )
| 714 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = '''segformer'''
def __init__( self : List[Any] , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : List[str]=[2, 2, 2, 2] , UpperCAmelCase_ : Optional[int]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : int=[7, 3, 3, 3] , UpperCAmelCase_ : str=[4, 2, 2, 2] , UpperCAmelCase_ : List[str]=[1, 2, 5, 8] , UpperCAmelCase_ : List[Any]=[4, 4, 4, 4] , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=1E-6 , UpperCAmelCase_ : List[str]=256 , UpperCAmelCase_ : Dict=255 , **UpperCAmelCase_ : Dict , ):
super().__init__(**UpperCAmelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : Any = num_encoder_blocks
SCREAMING_SNAKE_CASE : Tuple = depths
SCREAMING_SNAKE_CASE : Any = sr_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes
SCREAMING_SNAKE_CASE : int = patch_sizes
SCREAMING_SNAKE_CASE : Optional[int] = strides
SCREAMING_SNAKE_CASE : Tuple = mlp_ratios
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = classifier_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = drop_path_rate
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = decoder_hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.get("reshape_last_stage" , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = version.parse('''1.11''' )
@property
def _A ( self : Optional[Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _A ( self : List[str] ):
return 1E-4
@property
def _A ( self : Any ):
return 12
| 488 | 0 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : str = len(__UpperCamelCase )
UpperCAmelCase__ : str = [[0] * n for i in range(__UpperCamelCase )]
for i in range(__UpperCamelCase ):
UpperCAmelCase__ : int = y_points[i]
for i in range(2 , __UpperCamelCase ):
for j in range(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowerCAmelCase : Optional[int] = 2
class lowerCAmelCase__ :
def __init__( self : Any , *, # begin keyword-only arguments
snake_case__ : List[str]="<s>" , snake_case__ : str="<pad>" , snake_case__ : List[str]="</s>" , snake_case__ : Any="<unk>" , snake_case__ : List[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = bos, unk, pad, eos
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : Optional[int] = self.add_symbol(snake_case__ )
UpperCAmelCase__ : Tuple = self.add_symbol(snake_case__ )
UpperCAmelCase__ : str = self.add_symbol(snake_case__ )
UpperCAmelCase__ : Optional[int] = self.add_symbol(snake_case__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(snake_case__ )
UpperCAmelCase__ : List[str] = len(self.symbols )
def __eq__( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : Dict , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : int ):
'''simple docstring'''
return len(self.symbols )
def __contains__( self : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
return sym in self.indices
@classmethod
def __a ( cls : Optional[int] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = cls()
d.add_from_file(snake_case__ )
return d
def __a ( self : Optional[Any] , snake_case__ : str , snake_case__ : Dict=1 , snake_case__ : Dict=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
UpperCAmelCase__ : List[Any] = self.indices[word]
UpperCAmelCase__ : Dict = self.count[idx] + n
return idx
else:
UpperCAmelCase__ : Optional[Any] = len(self.symbols )
UpperCAmelCase__ : List[str] = idx
self.symbols.append(snake_case__ )
self.count.append(snake_case__ )
return idx
def __a ( self : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
return 0
def __a ( self : Any , snake_case__ : Dict ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(snake_case__ ) )
return
UpperCAmelCase__ : Optional[int] = f.readlines()
UpperCAmelCase__ : Dict = self._load_meta(snake_case__ )
for line in lines[indices_start_line:]:
try:
UpperCAmelCase__ , UpperCAmelCase__ : str = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ , UpperCAmelCase__ : str = line.rsplit(" " , 1 )
else:
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Any = int(snake_case__ )
UpperCAmelCase__ : List[Any] = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(snake_case__ ) )
self.add_symbol(snake_case__ , n=snake_case__ , overwrite=snake_case__ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = dict((re.sub(r"@@$" , "" , snake_case ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , snake_case ), v) for k, v in d.items() )
UpperCAmelCase__ : Optional[int] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
UpperCAmelCase__ : List[str] = d[k] # restore
return da
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : int )-> Union[str, Any]:
'''simple docstring'''
if not os.path.exists(snake_case ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(snake_case , exist_ok=snake_case )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
UpperCAmelCase__ : Dict = os.path.join(snake_case , "checkpoint.pt" )
if not os.path.isfile(snake_case ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
UpperCAmelCase__ : Optional[int] = torch.load(snake_case , map_location="cpu" )
UpperCAmelCase__ : Optional[int] = chkpt["cfg"]["model"]
# dicts
UpperCAmelCase__ : Optional[int] = os.path.join(snake_case , "dict.txt" )
if not os.path.isfile(snake_case ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
UpperCAmelCase__ : Dict = Dictionary.load(snake_case )
UpperCAmelCase__ : Optional[Any] = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase__ : List[str] = len(snake_case )
UpperCAmelCase__ : Tuple = os.path.join(snake_case , VOCAB_FILES_NAMES["vocab_file"] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# merges_file (bpecodes)
UpperCAmelCase__ : List[Any] = os.path.join(snake_case , "bpecodes" )
if not os.path.isfile(snake_case ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
UpperCAmelCase__ : int = os.path.join(snake_case , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(snake_case , snake_case )
# model config
UpperCAmelCase__ : List[Any] = os.path.join(snake_case , "config.json" )
UpperCAmelCase__ : Any = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-1_2,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# tokenizer config
UpperCAmelCase__ : List[Any] = os.path.join(snake_case , snake_case )
UpperCAmelCase__ : str = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# model
UpperCAmelCase__ : Union[str, Any] = chkpt["model"]
# remove unneeded keys
UpperCAmelCase__ : Union[str, Any] = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
UpperCAmelCase__ : int = model_state_dict.pop(snake_case )
else:
UpperCAmelCase__ : Tuple = model_state_dict.pop(snake_case )
UpperCAmelCase__ : Any = BioGptConfig.from_pretrained(snake_case )
UpperCAmelCase__ : List[Any] = BioGptForCausalLM(snake_case )
# check that it loads ok
model_new.load_state_dict(snake_case )
# save
UpperCAmelCase__ : Union[str, Any] = os.path.join(snake_case , snake_case )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(snake_case , snake_case )
print("Conversion is done!" )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 438 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Optional[int] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = ['''ViTFeatureExtractor''']
__lowerCamelCase : Dict = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 501 |
import functools
def lowercase__ ( __A: list[int] ,__A: list[int] ):
'''simple docstring'''
if not isinstance(__A ,__A ) or not all(isinstance(__A ,__A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__A ) != 3 or not all(isinstance(__A ,__A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__A ) == 0:
return 0
if min(__A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__A ) >= 3_6_6:
raise ValueError('''All days elements should be less than 366''' )
__magic_name__ : Tuple = set(__A )
@functools.cache
def dynamic_programming(__A: int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 3_0 ) ,)
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 501 | 1 |
lowerCamelCase : int = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367 |
import numpy as np
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case = 1E-12 , __snake_case = 100 , ):
assert np.shape(__snake_case )[0] == np.shape(__snake_case )[1]
# Ensure proper dimensionality.
assert np.shape(__snake_case )[0] == np.shape(__snake_case )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__snake_case ) == np.iscomplexobj(__snake_case )
__lowerCAmelCase = np.iscomplexobj(__snake_case )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__snake_case , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__lowerCAmelCase = False
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 1E12
while not convergence:
# Multiple matrix by the vector.
__lowerCAmelCase = np.dot(__snake_case , __snake_case )
# Normalize the resulting output vector.
__lowerCAmelCase = w / np.linalg.norm(__snake_case )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__lowerCAmelCase = vector.conj().T if is_complex else vector.T
__lowerCAmelCase = np.dot(__snake_case , np.dot(__snake_case , __snake_case ) )
# Check convergence.
__lowerCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__lowerCAmelCase = True
__lowerCAmelCase = lambda_
if is_complex:
__lowerCAmelCase = np.real(lambda_ )
return lambda_, vector
def __lowerCAmelCase ( ):
__lowerCAmelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__lowerCAmelCase = np.array([41, 4, 20] )
__lowerCAmelCase = real_input_matrix.astype(np.complexaaa )
__lowerCAmelCase = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__lowerCAmelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__lowerCAmelCase = real_input_matrix
__lowerCAmelCase = real_vector
elif problem_type == "complex":
__lowerCAmelCase = complex_input_matrix
__lowerCAmelCase = complex_vector
# Our implementation.
__lowerCAmelCase , __lowerCAmelCase = power_iteration(__snake_case , __snake_case )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(__snake_case )
# Last eigenvalue is the maximum one.
__lowerCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__lowerCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__snake_case ) - np.abs(__snake_case ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 367 | 1 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__magic_name__ : Any = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ['''audio_values''', '''audio_mask''']
def __init__( self , lowerCamelCase=2_048 , lowerCamelCase=1 , lowerCamelCase=[16, 16] , lowerCamelCase=128 , lowerCamelCase=44_100 , lowerCamelCase=86 , lowerCamelCase=2_048 , lowerCamelCase=0.0 , **lowerCamelCase , ):
super().__init__(
feature_size=lowerCamelCase , sampling_rate=lowerCamelCase , padding_value=lowerCamelCase , **lowerCamelCase , )
lowercase = spectrogram_length
lowercase = num_channels
lowercase = patch_size
lowercase = feature_size // self.patch_size[1]
lowercase = n_fft
lowercase = sampling_rate // hop_length_to_sampling_rate
lowercase = sampling_rate
lowercase = padding_value
lowercase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCamelCase , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=lowerCamelCase , norm="slaney" , mel_scale="slaney" , ).T
def UpperCamelCase( self , lowerCamelCase ):
lowercase = spectrogram(
lowerCamelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
lowercase = log_spec[:, :-1]
lowercase = log_spec - 20.0
lowercase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , **lowerCamelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase = isinstance(lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase = is_batched_numpy or (
isinstance(lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase , np.ndarray ):
lowercase = np.asarray(lowerCamelCase , dtype=np.floataa )
elif isinstance(lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCamelCase ):
lowercase = [np.asarray(lowerCamelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase = np.array(lowerCamelCase ).astype(np.floataa )
# convert into correct format for padding
lowercase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase = np.ones([len(lowerCamelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase = padded_audio_features * self.padding_value
for i in range(len(lowerCamelCase ) ):
lowercase = audio_features[i]
lowercase = feature
# return as BatchFeature
if return_attention_mask:
lowercase = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowercase = {"audio_values": padded_audio_features}
lowercase = BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
return encoded_inputs
| 706 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__magic_name__ : Union[str, Any] = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__magic_name__ : int = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE__ )[0]
@deprecated(SCREAMING_SNAKE_CASE__ , "Please use tf.data to implement this functionality." )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_51:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
_snake_case = bytestream.read(rows * cols * num_images )
_snake_case = numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
_snake_case = data.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
return data
@deprecated(SCREAMING_SNAKE_CASE__ , "Please use tf.one_hot on tensors." )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = labels_dense.shape[0]
_snake_case = numpy.arange(SCREAMING_SNAKE_CASE__ ) * num_classes
_snake_case = numpy.zeros((num_labels, num_classes) )
_snake_case = 1
return labels_one_hot
@deprecated(SCREAMING_SNAKE_CASE__ , "Please use tf.data to implement this functionality." )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=10 ):
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE__ ) as bytestream:
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
if magic != 20_49:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
_snake_case = _readaa(SCREAMING_SNAKE_CASE__ )
_snake_case = bytestream.read(SCREAMING_SNAKE_CASE__ )
_snake_case = numpy.frombuffer(SCREAMING_SNAKE_CASE__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return labels
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
@deprecated(
lowerCamelCase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=dtypes.floataa , lowerCamelCase=True , lowerCamelCase=None , ):
_snake_case , _snake_case = random_seed.get_seed(lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_snake_case = dtypes.as_dtype(lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
_snake_case = 10_000
_snake_case = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
_snake_case = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_snake_case = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_snake_case = images.astype(numpy.floataa )
_snake_case = numpy.multiply(lowerCamelCase , 1.0 / 255.0 )
_snake_case = images
_snake_case = labels
_snake_case = 0
_snake_case = 0
@property
def UpperCamelCase( self ):
return self._images
@property
def UpperCamelCase( self ):
return self._labels
@property
def UpperCamelCase( self ):
return self._num_examples
@property
def UpperCamelCase( self ):
return self._epochs_completed
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=True ):
if fake_data:
_snake_case = [1] * 784
_snake_case = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowerCamelCase )],
[fake_label for _ in range(lowerCamelCase )],
)
_snake_case = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_snake_case = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCamelCase )
_snake_case = self.images[perma]
_snake_case = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_snake_case = self._num_examples - start
_snake_case = self._images[start : self._num_examples]
_snake_case = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_snake_case = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCamelCase )
_snake_case = self.images[perm]
_snake_case = self.labels[perm]
# Start next epoch
_snake_case = 0
_snake_case = batch_size - rest_num_examples
_snake_case = self._index_in_epoch
_snake_case = self._images[start:end]
_snake_case = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_snake_case = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(SCREAMING_SNAKE_CASE__ , "Please write your own downloading logic." )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
gfile.MakeDirs(SCREAMING_SNAKE_CASE__ )
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not gfile.Exists(SCREAMING_SNAKE_CASE__ ):
urllib.request.urlretrieve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # noqa: S310
with gfile.GFile(SCREAMING_SNAKE_CASE__ ) as f:
_snake_case = f.size()
print("Successfully downloaded" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , "bytes." )
return filepath
@deprecated(
SCREAMING_SNAKE_CASE__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=dtypes.floataa , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=50_00 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ )
_snake_case = fake()
_snake_case = fake()
_snake_case = fake()
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
if not source_url: # empty string check
_snake_case = DEFAULT_SOURCE_URL
_snake_case = "train-images-idx3-ubyte.gz"
_snake_case = "train-labels-idx1-ubyte.gz"
_snake_case = "t10k-images-idx3-ubyte.gz"
_snake_case = "t10k-labels-idx1-ubyte.gz"
_snake_case = _maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
_snake_case = _extract_images(SCREAMING_SNAKE_CASE__ )
_snake_case = _maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + train_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
_snake_case = _extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
_snake_case = _maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
_snake_case = _extract_images(SCREAMING_SNAKE_CASE__ )
_snake_case = _maybe_download(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , source_url + test_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
_snake_case = _extract_labels(SCREAMING_SNAKE_CASE__ , one_hot=SCREAMING_SNAKE_CASE__ )
if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE__ ):
_snake_case = (
"Validation size should be between 0 and "
f'''{len(SCREAMING_SNAKE_CASE__ )}. Received: {validation_size}.'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
_snake_case = train_images[:validation_size]
_snake_case = train_labels[:validation_size]
_snake_case = train_images[validation_size:]
_snake_case = train_labels[validation_size:]
_snake_case = {"dtype": dtype, "reshape": reshape, "seed": seed}
_snake_case = _DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_snake_case = _DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_snake_case = _DataSet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return _Datasets(train=SCREAMING_SNAKE_CASE__ , validation=SCREAMING_SNAKE_CASE__ , test=SCREAMING_SNAKE_CASE__ )
| 368 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : List[Any] = '''Wav2Vec2FeatureExtractor'''
A__ : Any = '''AutoTokenizer'''
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ =self.feature_extractor
lowerCamelCase__ =False
@classmethod
def _a ( cls , _lowerCamelCase , **_lowerCamelCase ):
try:
return super().from_pretrained(_lowerCamelCase , **_lowerCamelCase )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , _lowerCamelCase , )
lowerCamelCase__ =WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ =WavaVecaCTCTokenizer.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
return cls(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCamelCase__ =kwargs.pop("raw_speech" )
else:
lowerCamelCase__ =kwargs.pop("audio" , _lowerCamelCase )
lowerCamelCase__ =kwargs.pop("sampling_rate" , _lowerCamelCase )
lowerCamelCase__ =kwargs.pop("text" , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ =args[0]
lowerCamelCase__ =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCamelCase__ =self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
if text is not None:
lowerCamelCase__ =self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ =encodings["input_ids"]
return inputs
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ =kwargs.pop("input_features" , _lowerCamelCase )
lowerCamelCase__ =kwargs.pop("labels" , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ =args[0]
lowerCamelCase__ =args[1:]
if input_features is not None:
lowerCamelCase__ =self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if labels is not None:
lowerCamelCase__ =self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCamelCase__ =labels["input_ids"]
return input_features
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def _a ( self ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCamelCase__ =True
lowerCamelCase__ =self.tokenizer
yield
lowerCamelCase__ =self.feature_extractor
lowerCamelCase__ =False
| 530 | """simple docstring"""
from __future__ import annotations
from typing import Any
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 ):
lowerCamelCase__ , lowerCamelCase__ =row, column
lowerCamelCase__ =[[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self ):
lowerCamelCase__ =F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCamelCase__ =0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase__ =max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
lowerCamelCase__ =F'''%{max_element_length}s'''
# Make string and return
def single_line(_lowerCamelCase ) -> str:
nonlocal string_format_identifier
lowerCamelCase__ ="["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def _a ( self , _lowerCamelCase ):
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , _lowerCamelCase ):
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , _lowerCamelCase , _lowerCamelCase ):
assert self.validate_indicies(_lowerCamelCase )
lowerCamelCase__ =value
def __add__( self , _lowerCamelCase ):
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase__ =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ =self[r, c] + another[r, c]
return result
def __neg__( self ):
lowerCamelCase__ =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ =-self[r, c]
return result
def __sub__( self , _lowerCamelCase ):
return self + (-another)
def __mul__( self , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
lowerCamelCase__ =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ =self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
lowerCamelCase__ =Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase__ =F'''Unsupported type given for another ({type(_lowerCamelCase )})'''
raise TypeError(_lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase__ =self[r, c]
return result
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase__ =v.transpose()
lowerCamelCase__ =(v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
lowerCamelCase__ =Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase__ =1
print(F'''a^(-1) is {ainv}''' )
# u, v
lowerCamelCase__ =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =1, 2, -3
lowerCamelCase__ =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowerCAmelCase , __lowerCAmelCase )}''' )
def lowerCamelCase_ ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 530 | 1 |
'''simple docstring'''
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class snake_case__ ( TensorFormatter[Mapping, "torch.Tensor", Mapping]):
def __init__( self : List[str] , _A : str=None , **_A : Dict ) -> Optional[Any]:
super().__init__(features=_A )
UpperCAmelCase_ : Dict = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A ( self : List[str] , _A : List[str] ) -> List[Any]:
import torch
if isinstance(_A , _A ) and column:
if all(
isinstance(_A , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(_A )
return column
def A ( self : int , _A : str ) -> Optional[int]:
import torch
if isinstance(_A , (str, bytes, type(_A )) ):
return value
elif isinstance(_A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase_ : int = {}
if isinstance(_A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
UpperCAmelCase_ : Optional[Any] = {'''dtype''': torch.intaa}
elif isinstance(_A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase_ : Optional[int] = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A , PIL.Image.Image ):
UpperCAmelCase_ : Optional[int] = np.asarray(_A )
return torch.tensor(_A , **{**default_dtype, **self.torch_tensor_kwargs} )
def A ( self : Any , _A : List[Any] ) -> Union[str, Any]:
import torch
# support for torch, tf, jax etc.
if hasattr(_A , '''__array__''' ) and not isinstance(_A , torch.Tensor ):
UpperCAmelCase_ : int = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def A ( self : List[Any] , _A : dict ) -> Any:
return map_nested(self._recursive_tensorize , _A , map_list=_A )
def A ( self : Optional[Any] , _A : pa.Table ) -> Mapping:
UpperCAmelCase_ : List[str] = self.numpy_arrow_extractor().extract_row(_A )
UpperCAmelCase_ : List[Any] = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def A ( self : Any , _A : pa.Table ) -> "torch.Tensor":
UpperCAmelCase_ : int = self.numpy_arrow_extractor().extract_column(_A )
UpperCAmelCase_ : Any = self.python_features_decoder.decode_column(_A , pa_table.column_names[0] )
UpperCAmelCase_ : str = self.recursive_tensorize(_A )
UpperCAmelCase_ : Union[str, Any] = self._consolidate(_A )
return column
def A ( self : List[str] , _A : pa.Table ) -> Mapping:
UpperCAmelCase_ : Optional[int] = self.numpy_arrow_extractor().extract_batch(_A )
UpperCAmelCase_ : Tuple = self.python_features_decoder.decode_batch(_A )
UpperCAmelCase_ : Dict = self.recursive_tensorize(_A )
for column_name in batch:
UpperCAmelCase_ : int = self._consolidate(batch[column_name] )
return batch
| 706 |
'''simple docstring'''
class snake_case__ :
def __init__( self : Dict , _A : int ) -> Tuple:
UpperCAmelCase_ : List[str] = n
UpperCAmelCase_ : Optional[Any] = [None] * self.n
UpperCAmelCase_ : List[str] = 0 # index of the first element
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[Any] = 0
def __len__( self : Optional[int] ) -> int:
return self.size
def A ( self : List[Any] ) -> bool:
return self.size == 0
def A ( self : str ) -> Dict:
return False if self.is_empty() else self.array[self.front]
def A ( self : Any , _A : int ) -> List[str]:
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCAmelCase_ : Dict = data
UpperCAmelCase_ : List[str] = (self.rear + 1) % self.n
self.size += 1
return self
def A ( self : Optional[int] ) -> str:
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCAmelCase_ : Dict = self.array[self.front]
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Dict = (self.front + 1) % self.n
self.size -= 1
return temp
| 216 | 0 |
_lowerCamelCase : Union[str, Any] = [0, 2, 4, 6, 8]
_lowerCamelCase : List[Any] = [1, 3, 5, 7, 9]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
A__ = 0
for digit in range(10 ):
A__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , lowercase_ , lowercase_ )
return result
A__ = 0
for digita in range(10 ):
A__ = digita
if (remainder + digita) % 2 == 0:
A__ = ODD_DIGITS
else:
A__ = EVEN_DIGITS
for digita in other_parity_digits:
A__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , lowercase_ , lowercase_ , )
return result
def SCREAMING_SNAKE_CASE ( lowercase_ = 9 ) -> int:
"""simple docstring"""
A__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(lowercase_ , 0 , [0] * length , lowercase_ )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 87 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = "informer"
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self: Tuple , _lowerCamelCase: Optional[int] = None , _lowerCamelCase: Optional[int] = None , _lowerCamelCase: str = "student_t" , _lowerCamelCase: str = "nll" , _lowerCamelCase: int = 1 , _lowerCamelCase: List[int] = None , _lowerCamelCase: Optional[Union[str, bool]] = "mean" , _lowerCamelCase: int = 0 , _lowerCamelCase: int = 0 , _lowerCamelCase: int = 0 , _lowerCamelCase: int = 0 , _lowerCamelCase: Optional[List[int]] = None , _lowerCamelCase: Optional[List[int]] = None , _lowerCamelCase: int = 64 , _lowerCamelCase: int = 32 , _lowerCamelCase: int = 32 , _lowerCamelCase: int = 2 , _lowerCamelCase: int = 2 , _lowerCamelCase: int = 2 , _lowerCamelCase: int = 2 , _lowerCamelCase: bool = True , _lowerCamelCase: str = "gelu" , _lowerCamelCase: float = 0.05 , _lowerCamelCase: float = 0.1 , _lowerCamelCase: float = 0.1 , _lowerCamelCase: float = 0.1 , _lowerCamelCase: float = 0.1 , _lowerCamelCase: int = 1_00 , _lowerCamelCase: float = 0.02 , _lowerCamelCase: List[str]=True , _lowerCamelCase: str = "prob" , _lowerCamelCase: int = 5 , _lowerCamelCase: bool = True , **_lowerCamelCase: Tuple , ):
# time series specific configuration
SCREAMING_SNAKE_CASE_ = prediction_length
SCREAMING_SNAKE_CASE_ = context_length or prediction_length
SCREAMING_SNAKE_CASE_ = distribution_output
SCREAMING_SNAKE_CASE_ = loss
SCREAMING_SNAKE_CASE_ = input_size
SCREAMING_SNAKE_CASE_ = num_time_features
SCREAMING_SNAKE_CASE_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE_ = scaling
SCREAMING_SNAKE_CASE_ = num_dynamic_real_features
SCREAMING_SNAKE_CASE_ = num_static_real_features
SCREAMING_SNAKE_CASE_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
SCREAMING_SNAKE_CASE_ = cardinality
else:
SCREAMING_SNAKE_CASE_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
SCREAMING_SNAKE_CASE_ = embedding_dimension
else:
SCREAMING_SNAKE_CASE_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
SCREAMING_SNAKE_CASE_ = num_parallel_samples
# Transformer architecture configuration
SCREAMING_SNAKE_CASE_ = input_size * len(self.lags_sequence ) + self._number_of_features
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = use_cache
# Informer
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = sampling_factor
SCREAMING_SNAKE_CASE_ = distil
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _A ( self: List[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 234 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : str = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
A : Tuple = dict(zip(lowerCamelCase__, range(len(lowerCamelCase__ ) ) ) )
A : int = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
A : Dict = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
A : Dict = tempfile.mkdtemp()
A : List[str] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
A : List[Any] = os.path.join(self.tmpdirname, lowerCamelCase__ )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
with open(self.feature_extraction_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
# load decoder from hub
A : int = """hf-internal-testing/ngram-beam-search-decoder"""
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
A : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCamelCase__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name, **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_tokenizer()
A : Optional[int] = self.get_feature_extractor()
A : Union[str, Any] = self.get_decoder()
A : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__, decoder=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
A : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor, lowerCamelCase__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels, decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set, decoder.model_container[decoder._model_key]._unigram_set, )
self.assertIsInstance(processor.decoder, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
A : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname, alpha=5.0, beta=3.0, score_boundary=-7.0, unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha, 5.0 )
self.assertEqual(processor.language_model.beta, 3.0 )
self.assertEqual(processor.language_model.score_boundary, -7.0 )
self.assertEqual(processor.language_model.unk_score_offset, 3 )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(lowerCamelCase__, """include""" ):
WavaVecaProcessorWithLM(
tokenizer=lowerCamelCase__, feature_extractor=self.get_feature_extractor(), decoder=self.get_decoder() )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_feature_extractor()
A : Any = self.get_tokenizer()
A : Dict = self.get_decoder()
A : Dict = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__, decoder=lowerCamelCase__ )
A : Union[str, Any] = floats_list((3, 1000) )
A : int = feature_extractor(lowerCamelCase__, return_tensors="""np""" )
A : Union[str, Any] = processor(lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : int = self.get_feature_extractor()
A : List[Any] = self.get_tokenizer()
A : str = self.get_decoder()
A : int = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__, decoder=lowerCamelCase__ )
A : List[str] = """This is a test string"""
A : Optional[Any] = processor(text=lowerCamelCase__ )
A : str = tokenizer(lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self, lowerCamelCase__=(2, 10, 16), lowerCamelCase__=77 ):
np.random.seed(lowerCamelCase__ )
return np.random.rand(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : int = self.get_feature_extractor()
A : Optional[Any] = self.get_tokenizer()
A : str = self.get_decoder()
A : List[Any] = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__, decoder=lowerCamelCase__ )
A : str = self._get_dummy_logits(shape=(10, 16), seed=13 )
A : Tuple = processor.decode(lowerCamelCase__ )
A : List[Any] = decoder.decode_beams(lowerCamelCase__ )[0]
self.assertEqual(decoded_decoder[0], decoded_processor.text )
self.assertEqual("""</s> <s> </s>""", decoded_processor.text )
self.assertEqual(decoded_decoder[-2], decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1], decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[Any] = self.get_feature_extractor()
A : List[str] = self.get_tokenizer()
A : str = self.get_decoder()
A : int = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__, decoder=lowerCamelCase__ )
A : List[str] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
A : Dict = processor.batch_decode(lowerCamelCase__ )
else:
with get_context(lowerCamelCase__ ).Pool() as pool:
A : Dict = processor.batch_decode(lowerCamelCase__, lowerCamelCase__ )
A : Optional[int] = list(lowerCamelCase__ )
with get_context("""fork""" ).Pool() as p:
A : List[str] = decoder.decode_beams_batch(lowerCamelCase__, lowerCamelCase__ )
A , A , A : int = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCamelCase__, decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""], decoded_processor.text )
self.assertListEqual(lowerCamelCase__, decoded_processor.logit_score )
self.assertListEqual(lowerCamelCase__, decoded_processor.lm_score )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_feature_extractor()
A : Union[str, Any] = self.get_tokenizer()
A : Any = self.get_decoder()
A : Tuple = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__, decoder=lowerCamelCase__ )
A : Optional[int] = self._get_dummy_logits()
A : Tuple = 15
A : List[Any] = -20.0
A : Any = -4.0
A : Union[str, Any] = processor.batch_decode(
lowerCamelCase__, beam_width=lowerCamelCase__, beam_prune_logp=lowerCamelCase__, token_min_logp=lowerCamelCase__, )
A : str = decoded_processor_out.text
A : List[Any] = list(lowerCamelCase__ )
with get_context("""fork""" ).Pool() as pool:
A : int = decoder.decode_beams_batch(
lowerCamelCase__, lowerCamelCase__, beam_width=lowerCamelCase__, beam_prune_logp=lowerCamelCase__, token_min_logp=lowerCamelCase__, )
A : Any = [d[0][0] for d in decoded_decoder_out]
A : Tuple = [d[0][2] for d in decoded_decoder_out]
A : str = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""], lowerCamelCase__ )
self.assertTrue(np.array_equal(lowerCamelCase__, decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447], lowerCamelCase__, atol=1e-3 ) )
self.assertTrue(np.array_equal(lowerCamelCase__, decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474], lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self ):
A : int = self.get_feature_extractor()
A : List[str] = self.get_tokenizer()
A : List[str] = self.get_decoder()
A : Tuple = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__, decoder=lowerCamelCase__ )
A : Dict = self._get_dummy_logits()
A : List[Any] = 2.0
A : Tuple = 5.0
A : Any = -20.0
A : List[str] = True
A : Optional[int] = processor.batch_decode(
lowerCamelCase__, alpha=lowerCamelCase__, beta=lowerCamelCase__, unk_score_offset=lowerCamelCase__, lm_score_boundary=lowerCamelCase__, )
A : Optional[int] = decoded_processor_out.text
A : Optional[Any] = list(lowerCamelCase__ )
decoder.reset_params(
alpha=lowerCamelCase__, beta=lowerCamelCase__, unk_score_offset=lowerCamelCase__, lm_score_boundary=lowerCamelCase__, )
with get_context("""fork""" ).Pool() as pool:
A : str = decoder.decode_beams_batch(
lowerCamelCase__, lowerCamelCase__, )
A : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""], lowerCamelCase__ )
A : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha, 2.0 )
self.assertEqual(lm_model.beta, 5.0 )
self.assertEqual(lm_model.unk_score_offset, -20.0 )
self.assertEqual(lm_model.score_boundary, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
A : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
A : Union[str, Any] = os.listdir(lowerCamelCase__ )
A : Any = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Any = snapshot_download("""hf-internal-testing/processor_with_lm""" )
A : List[Any] = WavaVecaProcessorWithLM.from_pretrained(lowerCamelCase__ )
A : Any = processor.decoder.model_container[processor.decoder._model_key]
A : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
A : int = os.listdir(lowerCamelCase__ )
A : List[Any] = os.listdir(lowerCamelCase__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A : int = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A : Union[str, Any] = floats_list((3, 1000) )
A : Optional[int] = processor_wavaveca(lowerCamelCase__, return_tensors="""np""" )
A : List[Any] = processor_auto(lowerCamelCase__, return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum(), input_auto[key].sum(), delta=1e-2 )
A : List[str] = self._get_dummy_logits()
A : Dict = processor_wavaveca.batch_decode(lowerCamelCase__ )
A : Union[str, Any] = processor_auto.batch_decode(lowerCamelCase__ )
self.assertListEqual(decoded_wavaveca.text, decoded_auto.text )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_feature_extractor()
A : Optional[Any] = self.get_tokenizer()
A : Tuple = self.get_decoder()
A : str = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__, feature_extractor=lowerCamelCase__, decoder=lowerCamelCase__ )
self.assertListEqual(
processor.model_input_names, feature_extractor.model_input_names, msg="""`processor` and `feature_extractor` model input names do not match""", )
@staticmethod
def _lowerCAmelCase ( lowerCamelCase__, lowerCamelCase__ ):
A : str = [d[key] for d in offsets]
return retrieved_list
def _lowerCAmelCase ( self ):
A : List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A : Dict = self._get_dummy_logits()[0]
A : Tuple = processor.decode(lowerCamelCase__, output_word_offsets=lowerCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCamelCase__, lowerCamelCase__ ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""], """word""" ) ), outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""], """word""" ), ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""], """start_offset""" ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""], """end_offset""" ), [1, 3, 5] )
def _lowerCAmelCase ( self ):
A : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A : Tuple = self._get_dummy_logits()
A : int = processor.batch_decode(lowerCamelCase__, output_word_offsets=lowerCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ), 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCamelCase__, lowerCamelCase__ ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(lowerCamelCase__, """word""" ) ) for o in outputs["""word_offsets"""]], outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0], """word""" ), ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0], """start_offset""" ), [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0], """end_offset""" ), [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _lowerCAmelCase ( self ):
import torch
A : Optional[Any] = load_dataset("""common_voice""", """en""", split="""train""", streaming=lowerCamelCase__ )
A : int = ds.cast_column("""audio""", datasets.Audio(sampling_rate=1_6000 ) )
A : int = iter(lowerCamelCase__ )
A : Dict = next(lowerCamelCase__ )
A : Union[str, Any] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
A : List[Any] = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
A : Dict = processor(sample["""audio"""]["""array"""], return_tensors="""pt""" ).input_values
with torch.no_grad():
A : Tuple = model(lowerCamelCase__ ).logits.cpu().numpy()
A : List[str] = processor.decode(logits[0], output_word_offsets=lowerCamelCase__ )
A : Any = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
A : Union[str, Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
A : Optional[Any] = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(lowerCamelCase__, """word""" ) ), lowerCamelCase__ )
self.assertEqual(""" """.join(self.get_from_offsets(lowerCamelCase__, """word""" ) ), output.text )
# output times
A : str = torch.tensor(self.get_from_offsets(lowerCamelCase__, """start_time""" ) )
A : Optional[Any] = torch.tensor(self.get_from_offsets(lowerCamelCase__, """end_time""" ) )
# fmt: off
A : Union[str, Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
A : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=0.01 ) )
self.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=0.01 ) )
| 520 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : str = DistilBertTokenizer
__lowerCamelCase : Union[str, Any] = DistilBertTokenizerFast
__lowerCamelCase : List[Any] = True
@slow
def _lowerCAmelCase ( self ):
A : List[Any] = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
A : List[Any] = tokenizer.encode("""sequence builders""", add_special_tokens=lowerCamelCase__ )
A : Optional[int] = tokenizer.encode("""multi-sequence build""", add_special_tokens=lowerCamelCase__ )
A : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
A : Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__, lowerCamelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 520 | 1 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = 42 # [batch_size x 3]
__UpperCamelCase = 42 # [batch_size x 3]
__UpperCamelCase = 42 # [batch_size x 3]
__UpperCamelCase = 42 # [batch_size x 3]
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
def __UpperCAmelCase ( self ) -> Dict:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __UpperCAmelCase ( self ) -> Tuple:
return torch.from_numpy(np.array([self.width, self.height], dtype=np.floataa ) )
def __UpperCAmelCase ( self ) -> List[str]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov], dtype=np.floataa ) )
def __UpperCAmelCase ( self ) -> torch.Tensor:
UpperCAmelCase__ =torch.arange(self.height * self.width )
UpperCAmelCase__ =torch.stack(
[
pixel_indices % self.width,
torch.div(A_, self.width, rounding_mode="trunc" ),
], axis=1, )
return coords
@property
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ , *UpperCAmelCase__ =self.shape
UpperCAmelCase__ =int(np.prod(A_ ) )
UpperCAmelCase__ =self.get_image_coords()
UpperCAmelCase__ =torch.broadcast_to(coords.unsqueeze(0 ), [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase__ =self.get_camera_rays(A_ )
UpperCAmelCase__ =rays.view(A_, inner_batch_size * self.height * self.width, 2, 3 )
return rays
def __UpperCAmelCase ( self, A_ ) -> torch.Tensor:
UpperCAmelCase__ , *UpperCAmelCase__ , UpperCAmelCase__ =coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase__ =coords.view(A_, -1, 2 )
UpperCAmelCase__ =self.resolution()
UpperCAmelCase__ =self.fov()
UpperCAmelCase__ =(flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase__ =fracs * torch.tan(fov / 2 )
UpperCAmelCase__ =fracs.view(A_, -1, 2 )
UpperCAmelCase__ =(
self.z.view(A_, 1, 3 )
+ self.x.view(A_, 1, 3 ) * fracs[:, :, :1]
+ self.y.view(A_, 1, 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase__ =directions / directions.norm(dim=-1, keepdim=A_ )
UpperCAmelCase__ =torch.stack(
[
torch.broadcast_to(self.origin.view(A_, 1, 3 ), [batch_size, directions.shape[1], 3] ),
directions,
], dim=2, )
return rays.view(A_, *A_, 2, 3 )
def __UpperCAmelCase ( self, A_, A_ ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin, x=self.x, y=self.y, z=self.z, width=A_, height=A_, x_fov=self.x_fov, y_fov=self.y_fov, )
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =[]
UpperCAmelCase__ =[]
UpperCAmelCase__ =[]
UpperCAmelCase__ =[]
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
UpperCAmelCase__ =np.array([np.sin(A ), np.cos(A ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase__ =-z * 4
UpperCAmelCase__ =np.array([np.cos(A ), -np.sin(A ), 0.0] )
UpperCAmelCase__ =np.cross(A , A )
origins.append(A )
xs.append(A )
ys.append(A )
zs.append(A )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A , axis=0 ) ).float() , width=A , height=A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A )) , )
| 625 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'EncodecFeatureExtractor'
__UpperCamelCase = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self, A_, A_ ) -> Optional[int]:
super().__init__(A_, A_ )
UpperCAmelCase__ =self.feature_extractor
UpperCAmelCase__ =False
def __UpperCAmelCase ( self, A_=None, A_=None, A_=True ) -> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=A_, language=A_, no_timestamps=A_ )
def __call__( self, *A_, **A_ ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A_, **A_ )
UpperCAmelCase__ =kwargs.pop("audio", A_ )
UpperCAmelCase__ =kwargs.pop("sampling_rate", A_ )
UpperCAmelCase__ =kwargs.pop("text", A_ )
if len(A_ ) > 0:
UpperCAmelCase__ =args[0]
UpperCAmelCase__ =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
UpperCAmelCase__ =self.tokenizer(A_, **A_ )
if audio is not None:
UpperCAmelCase__ =self.feature_extractor(A_, *A_, sampling_rate=A_, **A_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
UpperCAmelCase__ =audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
UpperCAmelCase__ =audio_inputs["padding_mask"]
return inputs
def __UpperCAmelCase ( self, *A_, **A_ ) -> Dict:
UpperCAmelCase__ =kwargs.pop("audio", A_ )
UpperCAmelCase__ =kwargs.pop("padding_mask", A_ )
if len(A_ ) > 0:
UpperCAmelCase__ =args[0]
UpperCAmelCase__ =args[1:]
if audio_values is not None:
return self._decode_audio(A_, padding_mask=A_ )
else:
return self.tokenizer.batch_decode(*A_, **A_ )
def __UpperCAmelCase ( self, *A_, **A_ ) -> int:
return self.tokenizer.decode(*A_, **A_ )
def __UpperCAmelCase ( self, A_, A_ = None ) -> List[np.ndarray]:
UpperCAmelCase__ =to_numpy(A_ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =audio_values.shape
if padding_mask is None:
return list(A_ )
UpperCAmelCase__ =to_numpy(A_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
UpperCAmelCase__ =seq_len - padding_mask.shape[-1]
UpperCAmelCase__ =1 - self.feature_extractor.padding_value
UpperCAmelCase__ =np.pad(A_, ((0, 0), (0, difference)), "constant", constant_values=A_ )
UpperCAmelCase__ =audio_values.tolist()
for i in range(A_ ):
UpperCAmelCase__ =np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
UpperCAmelCase__ =sliced_audio.reshape(A_, -1 )
return audio_values
| 625 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __magic_name__ ( __a : List[Any] , __a : List[Any]=10 ):
UpperCamelCase__ = []
for _ in range(__a ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __magic_name__ ( __a : Any , __a : Union[str, Any]=10 ):
UpperCamelCase__ = []
for step in range(__a ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = os.path.join(__a , """schedule.bin""" )
torch.save(scheduler.state_dict() , __a )
UpperCamelCase__ = torch.load(__a )
scheduler.load_state_dict(__a )
return lrs
@require_torch
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ )
UpperCamelCase__ = torch.tensor([0.4, 0.2, -0.5] )
UpperCamelCase__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCamelCase__ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_00 ):
UpperCamelCase__ = criterion(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ )
UpperCamelCase__ = torch.tensor([0.4, 0.2, -0.5] )
UpperCamelCase__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCamelCase__ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase__ , weight_decay=0.0 , relative_step=UpperCamelCase__ , scale_parameter=UpperCamelCase__ , warmup_init=UpperCamelCase__ , )
for _ in range(10_00 ):
UpperCamelCase__ = criterion(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __A( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ = 10
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ , msg=UpperCamelCase__ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCamelCase__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCamelCase__ , UpperCamelCase__ = data
UpperCamelCase__ = scheduler_func(self.optimizer , **UpperCamelCase__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCamelCase__ = unwrap_schedule(UpperCamelCase__ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase__ , UpperCamelCase__ , tol=1E-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
UpperCamelCase__ = scheduler_func(self.optimizer , **UpperCamelCase__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase__ ) # wrap to test picklability of the schedule
UpperCamelCase__ = unwrap_and_save_reload_schedule(UpperCamelCase__ , self.num_steps )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ , msg=F"failed for {scheduler_func} in save and reload" )
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = fn
def __call__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return self.fn(*UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = list(map(self , scheduler.lr_lambdas ) )
| 710 |
from sklearn.metrics import matthews_corrcoef
import datasets
lowerCamelCase_ = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
lowerCamelCase_ = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
lowerCamelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
return {
"matthews_correlation": float(matthews_corrcoef(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sample_weight=SCREAMING_SNAKE_CASE_ ) ),
}
| 86 | 0 |
'''simple docstring'''
from __future__ import annotations
__lowerCamelCase = []
def a__ ( UpperCamelCase_ : list[list[int]], UpperCamelCase_ : int, UpperCamelCase_ : int ):
for i in range(len(UpperCamelCase_ ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase_, -1, -1 ), range(UpperCamelCase_, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase_, -1, -1 ), range(UpperCamelCase_, len(UpperCamelCase_ ) ) ):
if board[i][j] == 1:
return False
return True
def a__ ( UpperCamelCase_ : list[list[int]], UpperCamelCase_ : int ):
if row >= len(UpperCamelCase_ ):
solution.append(UpperCamelCase_ )
printboard(UpperCamelCase_ )
print()
return True
for i in range(len(UpperCamelCase_ ) ):
if is_safe(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ ):
UpperCAmelCase__ :Optional[Any] = 1
solve(UpperCamelCase_, row + 1 )
UpperCAmelCase__ :Optional[int] = 0
return False
def a__ ( UpperCamelCase_ : list[list[int]] ):
for i in range(len(UpperCamelCase_ ) ):
for j in range(len(UpperCamelCase_ ) ):
if board[i][j] == 1:
print('''Q''', end=''' ''' )
else:
print('''.''', end=''' ''' )
print()
# n=int(input("The no. of queens"))
__lowerCamelCase = 8
__lowerCamelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 467 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class UpperCAmelCase :
def __init__( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=9_9 , __lowerCamelCase : int=1_3 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Dict=9 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[Any]=3_2 , __lowerCamelCase : int=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Tuple=3_7 , __lowerCamelCase : Any=8 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Any=0.0_02 , __lowerCamelCase : List[str]=1 , __lowerCamelCase : str=0 , __lowerCamelCase : str=0 , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , ):
UpperCAmelCase__ :Tuple = parent
UpperCAmelCase__ :str = batch_size
UpperCAmelCase__ :int = encoder_seq_length
UpperCAmelCase__ :Optional[int] = decoder_seq_length
# For common tests
UpperCAmelCase__ :int = self.decoder_seq_length
UpperCAmelCase__ :List[Any] = is_training
UpperCAmelCase__ :Any = use_attention_mask
UpperCAmelCase__ :Tuple = use_labels
UpperCAmelCase__ :Optional[int] = vocab_size
UpperCAmelCase__ :Optional[Any] = hidden_size
UpperCAmelCase__ :Optional[Any] = num_hidden_layers
UpperCAmelCase__ :Tuple = num_attention_heads
UpperCAmelCase__ :str = d_ff
UpperCAmelCase__ :Tuple = relative_attention_num_buckets
UpperCAmelCase__ :int = dropout_rate
UpperCAmelCase__ :Dict = initializer_factor
UpperCAmelCase__ :int = eos_token_id
UpperCAmelCase__ :Tuple = pad_token_id
UpperCAmelCase__ :Tuple = decoder_start_token_id
UpperCAmelCase__ :List[str] = None
UpperCAmelCase__ :List[str] = decoder_layers
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return TaConfig.from_pretrained('''google/umt5-base''' )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[Any]=None , ):
if attention_mask is None:
UpperCAmelCase__ :Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCAmelCase__ :Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCAmelCase__ :List[str] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__lowerCamelCase )
if decoder_head_mask is None:
UpperCAmelCase__ :Optional[Any] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__lowerCamelCase )
if cross_attn_head_mask is None:
UpperCAmelCase__ :List[Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :Optional[int] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
UpperCAmelCase__ :List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCAmelCase__ :int = input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase__ :Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase__ :Dict = self.get_config()
UpperCAmelCase__ :Union[str, Any] = config.num_attention_heads
UpperCAmelCase__ :Dict = self.prepare_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, input_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ , UpperCAmelCase__ :int = self.prepare_config_and_inputs()
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , ):
UpperCAmelCase__ :str = UMTaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
UpperCAmelCase__ :Optional[int] = model(
input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , )
UpperCAmelCase__ :List[Any] = model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
UpperCAmelCase__ :Dict = result.last_hidden_state
UpperCAmelCase__ :Tuple = result.past_key_values
UpperCAmelCase__ :Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , ):
UpperCAmelCase__ :Tuple = UMTaModel(config=__lowerCamelCase ).get_decoder().to(__lowerCamelCase ).eval()
# first forward pass
UpperCAmelCase__ :Dict = model(__lowerCamelCase , use_cache=__lowerCamelCase )
UpperCAmelCase__ :Any = model(__lowerCamelCase )
UpperCAmelCase__ :List[str] = model(__lowerCamelCase , use_cache=__lowerCamelCase )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) )
self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) + 1 )
UpperCAmelCase__ , UpperCAmelCase__ :int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ :int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
UpperCAmelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ :Union[str, Any] = model(__lowerCamelCase )['''last_hidden_state''']
UpperCAmelCase__ :str = model(__lowerCamelCase , past_key_values=__lowerCamelCase )['''last_hidden_state''']
# select random slice
UpperCAmelCase__ :Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ :Any = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase__ :Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : str , ):
UpperCAmelCase__ :Optional[Any] = UMTaModel(config=__lowerCamelCase ).to(__lowerCamelCase ).half().eval()
UpperCAmelCase__ :Any = model(**__lowerCamelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__lowerCamelCase ).any().item() )
@require_torch
class UpperCAmelCase ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
UpperCAmelCase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase = [0.8, 0.9]
def __SCREAMING_SNAKE_CASE ( self : str ):
UpperCAmelCase__ :Optional[Any] = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __SCREAMING_SNAKE_CASE ( self : int ):
UpperCAmelCase__ :int = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ :Optional[int] = UMTaModel(config_and_inputs[0] ).to(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=__lowerCamelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ :Tuple = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
UpperCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ :int = config_and_inputs[0]
UpperCAmelCase__ :Optional[Any] = UMTaForConditionalGeneration(__lowerCamelCase ).eval()
model.to(__lowerCamelCase )
UpperCAmelCase__ :List[str] = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__lowerCamelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCamelCase ),
}
for attn_name, (name, mask) in zip(__lowerCamelCase , head_masking.items() ):
UpperCAmelCase__ :Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCAmelCase__ :Optional[Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=__lowerCamelCase )
UpperCAmelCase__ :Union[str, Any] = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__lowerCamelCase , return_dict_in_generate=__lowerCamelCase , **__lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCAmelCase__ :List[Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __SCREAMING_SNAKE_CASE ( self : Any ):
UpperCAmelCase__ :Optional[Any] = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__lowerCamelCase ).to(__lowerCamelCase )
UpperCAmelCase__ :List[str] = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__lowerCamelCase , legacy=__lowerCamelCase )
UpperCAmelCase__ :Dict = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
UpperCAmelCase__ :Optional[Any] = tokenizer(__lowerCamelCase , return_tensors='''pt''' , padding=__lowerCamelCase ).input_ids
# fmt: off
UpperCAmelCase__ :List[Any] = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ :Dict = model.generate(input_ids.to(__lowerCamelCase ) )
UpperCAmelCase__ :str = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํผํด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
UpperCAmelCase__ :Dict = tokenizer.batch_decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 467 | 1 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = []
if len(UpperCamelCase__ ) == 1:
return [nums.copy()]
for _ in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = nums.pop(0 )
__UpperCAmelCase = permute(UpperCamelCase__ )
for perm in permutations:
perm.append(UpperCamelCase__ )
result.extend(UpperCamelCase__ )
nums.append(UpperCamelCase__ )
return result
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
def backtrack(UpperCamelCase__ : str ):
if start == len(UpperCamelCase__ ) - 1:
output.append(nums[:] )
else:
for i in range(UpperCamelCase__ , len(UpperCamelCase__ ) ):
__UpperCAmelCase , __UpperCAmelCase = nums[i], nums[start]
backtrack(start + 1 )
__UpperCAmelCase , __UpperCAmelCase = nums[i], nums[start] # backtrack
__UpperCAmelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__lowerCAmelCase : Dict = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 702 | '''simple docstring'''
from pathlib import Path
import fire
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = Path(UpperCamelCase__ )
__UpperCAmelCase = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
__UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 654 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if openai_config_file == "":
snake_case_ = OpenAIGPTConfig()
else:
snake_case_ = OpenAIGPTConfig.from_json_file(__UpperCAmelCase )
snake_case_ = OpenAIGPTModel(__UpperCAmelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# Save pytorch-model
snake_case_ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict(), __UpperCAmelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__UpperCAmelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
a : Dict = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 640 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
snake_case_ = name.replace('''img_encoder.pos_embed''', '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ = name.replace('''img_encoder.patch_embed.proj''', '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ = name.replace('''img_encoder.patch_embed.norm''', '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
snake_case_ = name.replace('''img_encoder.layers''', '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
snake_case_ = name.replace('''blocks''', '''layers''' )
if "attn" in name and "pre_assign" not in name:
snake_case_ = name.replace('''attn''', '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ = name.replace('''proj''', '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ = name.replace('''pre_assign_attn.attn.proj''', '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
snake_case_ = name.replace('''norm1''', '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ = name.replace('''norm2''', '''layer_norm2''' )
if "img_encoder.norm" in name:
snake_case_ = name.replace('''img_encoder.norm''', '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ = name.replace('''text_encoder.token_embedding''', '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
snake_case_ = name.replace('''text_encoder.positional_embedding''', '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ = name.replace('''text_encoder.transformer.resblocks.''', '''text_model.encoder.layers.''' )
if "ln_1" in name:
snake_case_ = name.replace('''ln_1''', '''layer_norm1''' )
if "ln_2" in name:
snake_case_ = name.replace('''ln_2''', '''layer_norm2''' )
if "c_fc" in name:
snake_case_ = name.replace('''c_fc''', '''fc1''' )
if "c_proj" in name:
snake_case_ = name.replace('''c_proj''', '''fc2''' )
if "text_encoder" in name:
snake_case_ = name.replace('''text_encoder''', '''text_model''' )
if "ln_final" in name:
snake_case_ = name.replace('''ln_final''', '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ = name.replace('''img_projector.linear_hidden.''', '''visual_projection.''' )
if "img_projector.linear_out." in name:
snake_case_ = name.replace('''img_projector.linear_out.''', '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
snake_case_ = name.replace('''text_projector.linear_hidden''', '''text_projection''' )
if "text_projector.linear_out" in name:
snake_case_ = name.replace('''text_projector.linear_out''', '''text_projection.3''' )
return name
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('''.''' )
snake_case_ ,snake_case_ = int(key_split[2] ), int(key_split[4] )
snake_case_ = config.vision_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('''.''' )
snake_case_ = int(key_split[3] )
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(__UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ = val.squeeze_()
else:
snake_case_ = val
return orig_state_dict
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(__UpperCAmelCase, stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase="groupvit-gcc-yfcc", __UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
snake_case_ = GroupViTConfig()
snake_case_ = GroupViTModel(__UpperCAmelCase ).eval()
snake_case_ = torch.load(__UpperCAmelCase, map_location='''cpu''' )['''model''']
snake_case_ = convert_state_dict(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ ,snake_case_ = model.load_state_dict(__UpperCAmelCase, strict=__UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__UpperCAmelCase ) == 0)
# verify result
snake_case_ = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
snake_case_ = prepare_img()
snake_case_ = processor(text=['''a photo of a cat''', '''a photo of a dog'''], images=__UpperCAmelCase, padding=__UpperCAmelCase, return_tensors='''pt''' )
with torch.no_grad():
snake_case_ = model(**__UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image, __UpperCAmelCase, atol=1e-3 )
processor.save_pretrained(__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
print('''Successfully saved processor and model to''', __UpperCAmelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(__UpperCAmelCase, organization='''nielsr''' )
model.push_to_hub(__UpperCAmelCase, organization='''nielsr''' )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the ๐ค hub using the provided `model_name`.',
)
a : str = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 640 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = DiTPipeline
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCAmelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('''mps''' ):
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''cpu'''
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1E-3 )
def __lowerCAmelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCAmelCase ( self ):
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1 | 664 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["DeiTFeatureExtractor"]
UpperCAmelCase_ = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 664 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __a( _a ):
"""simple docstring"""
@slow
@require_torch
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' ,'''prajjwal1/bert-tiny''' )
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
UpperCAmelCase_ : Optional[int] = bertabert.config.encoder.vocab_size
UpperCAmelCase_ : Any = tokenizer.sep_token_id
UpperCAmelCase_ : Optional[int] = tokenizer.cls_token_id
UpperCAmelCase_ : List[Any] = 128
UpperCAmelCase_ : int = datasets.load_dataset('''cnn_dailymail''' ,'''3.0.0''' ,split='''train[:1%]''' )
UpperCAmelCase_ : str = datasets.load_dataset('''cnn_dailymail''' ,'''3.0.0''' ,split='''validation[:1%]''' )
UpperCAmelCase_ : str = train_dataset.select(range(32 ) )
UpperCAmelCase_ : int = val_dataset.select(range(16 ) )
UpperCAmelCase_ : Optional[Any] = 4
def _map_to_encoder_decoder_inputs(_SCREAMING_SNAKE_CASE ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase_ : List[str] = tokenizer(batch['''article'''] ,padding='''max_length''' ,truncation=_SCREAMING_SNAKE_CASE ,max_length=512 )
UpperCAmelCase_ : str = tokenizer(batch['''highlights'''] ,padding='''max_length''' ,truncation=_SCREAMING_SNAKE_CASE ,max_length=128 )
UpperCAmelCase_ : List[str] = inputs.input_ids
UpperCAmelCase_ : Optional[Any] = inputs.attention_mask
UpperCAmelCase_ : Optional[Any] = outputs.input_ids
UpperCAmelCase_ : Optional[int] = outputs.input_ids.copy()
UpperCAmelCase_ : Any = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
UpperCAmelCase_ : Dict = outputs.attention_mask
assert all(len(_SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(_SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = pred.label_ids
UpperCAmelCase_ : Dict = pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase_ : List[str] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE ,skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE ,skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) / len(_SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase_ : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs ,batched=_SCREAMING_SNAKE_CASE ,batch_size=_SCREAMING_SNAKE_CASE ,remove_columns=['''article''', '''highlights'''] ,)
train_dataset.set_format(
type='''torch''' ,columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] ,)
# same for validation dataset
UpperCAmelCase_ : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs ,batched=_SCREAMING_SNAKE_CASE ,batch_size=_SCREAMING_SNAKE_CASE ,remove_columns=['''article''', '''highlights'''] ,)
val_dataset.set_format(
type='''torch''' ,columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] ,)
UpperCAmelCase_ : Optional[int] = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : List[Any] = SeqaSeqTrainingArguments(
output_dir=_SCREAMING_SNAKE_CASE ,per_device_train_batch_size=_SCREAMING_SNAKE_CASE ,per_device_eval_batch_size=_SCREAMING_SNAKE_CASE ,predict_with_generate=_SCREAMING_SNAKE_CASE ,evaluation_strategy='''steps''' ,do_train=_SCREAMING_SNAKE_CASE ,do_eval=_SCREAMING_SNAKE_CASE ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,)
# instantiate trainer
UpperCAmelCase_ : int = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE ,args=_SCREAMING_SNAKE_CASE ,compute_metrics=_compute_metrics ,train_dataset=_SCREAMING_SNAKE_CASE ,eval_dataset=_SCREAMING_SNAKE_CASE ,tokenizer=_SCREAMING_SNAKE_CASE ,)
# start training
trainer.train() | 30 |
import requests
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = {'Content-Type': 'application/json'}
UpperCamelCase_ = requests.post(__lowercase , json={'text': message_body} , headers=__lowercase)
if response.status_code != 200:
UpperCamelCase_ = (
'Request to slack returned an error '
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(__lowercase)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 23 | 0 |
"""simple docstring"""
def snake_case__ ( _snake_case : int = 50 ):
"""simple docstring"""
UpperCamelCase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }") | 304 | """simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :int , lowerCamelCase_ :TransformeraDModel , lowerCamelCase_ :AutoencoderKL , lowerCamelCase_ :KarrasDiffusionSchedulers , lowerCamelCase_ :Optional[Dict[int, str]] = None , ) -> int:
"""simple docstring"""
super().__init__()
self.register_modules(transformer=lowerCamelCase_ , vae=lowerCamelCase_ , scheduler=lowerCamelCase_ )
# create a imagenet -> id dictionary for easier use
UpperCamelCase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
UpperCamelCase__ = int(lowerCamelCase_ )
UpperCamelCase__ = dict(sorted(self.labels.items() ) )
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :Union[str, List[str]] ) -> List[int]:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase__ = list(lowerCamelCase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :float = 4.0 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :int = 5_0 , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
UpperCamelCase__ = len(lowerCamelCase_ )
UpperCamelCase__ = self.transformer.config.sample_size
UpperCamelCase__ = self.transformer.config.in_channels
UpperCamelCase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCamelCase_ , device=self.device , dtype=self.transformer.dtype , )
UpperCamelCase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCamelCase__ = torch.tensor(lowerCamelCase_ , device=self.device ).reshape(-1 )
UpperCamelCase__ = torch.tensor([1_0_0_0] * batch_size , device=self.device )
UpperCamelCase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCamelCase__ = latent_model_input[: len(lowerCamelCase_ ) // 2]
UpperCamelCase__ = torch.cat([half, half] , dim=0 )
UpperCamelCase__ = self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = t
if not torch.is_tensor(lowerCamelCase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCamelCase__ = latent_model_input.device.type == "mps"
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase__ = torch.floataa if is_mps else torch.floataa
else:
UpperCamelCase__ = torch.intaa if is_mps else torch.intaa
UpperCamelCase__ = torch.tensor([timesteps] , dtype=lowerCamelCase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCamelCase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCamelCase__ = self.transformer(
lowerCamelCase_ , timestep=lowerCamelCase_ , class_labels=lowerCamelCase_ ).sample
# perform guidance
if guidance_scale > 1:
UpperCamelCase__ , UpperCamelCase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCamelCase__ , UpperCamelCase__ = torch.split(lowerCamelCase_ , len(lowerCamelCase_ ) // 2 , dim=0 )
UpperCamelCase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCamelCase__ = torch.cat([half_eps, half_eps] , dim=0 )
UpperCamelCase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCamelCase__ , UpperCamelCase__ = torch.split(lowerCamelCase_ , lowerCamelCase_ , dim=1 )
else:
UpperCamelCase__ = noise_pred
# compute previous image: x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
if guidance_scale > 1:
UpperCamelCase__ , UpperCamelCase__ = latent_model_input.chunk(2 , dim=0 )
else:
UpperCamelCase__ = latent_model_input
UpperCamelCase__ = 1 / self.vae.config.scaling_factor * latents
UpperCamelCase__ = self.vae.decode(lowerCamelCase_ ).sample
UpperCamelCase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCamelCase_ ) | 304 | 1 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCamelCase ( _A ) -> List[Any]:
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set() )
@pytest.fixture
def UpperCamelCase ( _A ) -> Tuple:
class UpperCamelCase :
def __init__( self :Optional[int] , __magic_name__ :int ) ->Tuple:
lowercase : Dict = metric_id
class UpperCamelCase :
_SCREAMING_SNAKE_CASE : Union[str, Any] = [MetricMock(__snake_case ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __snake_case ( self :List[Any] ) ->Union[str, Any]:
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock() )
@pytest.mark.parametrize(
"""func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] )
def UpperCamelCase ( _A , _A , _A , _A , _A ) -> List[Any]:
if "tmp_path" in args:
lowercase : Dict = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args )
with pytest.warns(_A , match="""https://huggingface.co/docs/evaluate""" ):
func(*_A )
| 264 |
"""simple docstring"""
def UpperCamelCase ( _A , _A ) -> int:
lowercase : int = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase : List[Any] = n - k
# Calculate C(n,k)
for i in range(_A ):
result *= n - i
result //= i + 1
return result
def UpperCamelCase ( _A ) -> int:
return binomial_coefficient(2 * node_count , _A ) // (node_count + 1)
def UpperCamelCase ( _A ) -> int:
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
lowercase : Union[str, Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCamelCase ( _A ) -> int:
return catalan_number(_A ) * factorial(_A )
if __name__ == "__main__":
_lowerCAmelCase = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 264 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
snake_case__ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def lowerCamelCase__ ( a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a__ :Optional[Any] = {}
with open(__lowerCAmelCase , "r" ) as file:
for line_number, line in enumerate(__lowerCAmelCase ):
a__ :Tuple = line.strip()
if line:
a__ :int = line.split()
a__ :Union[str, Any] = line_number
a__ :Tuple = words[0]
a__ :Tuple = value
return result
def lowerCamelCase__ ( a : Tuple , a : Tuple , a : str , a : Dict , a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split("." ):
a__ :int = getattr(__lowerCAmelCase , __lowerCAmelCase )
a__ :Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCAmelCase ):
a__ :Any = PARAM_MAPPING[full_name.split("." )[-1]]
a__ :Optional[Any] = "param"
if weight_type is not None and weight_type != "param":
a__ :List[str] = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
a__ :Optional[Any] = hf_pointer
for attribute in hf_param_name.split("." ):
a__ :str = getattr(__lowerCAmelCase , __lowerCAmelCase )
a__ :int = shape_pointer.shape
# let's reduce dimension
a__ :int = value[0]
else:
a__ :int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
a__ :Tuple = value
elif weight_type == "weight_g":
a__ :Optional[int] = value
elif weight_type == "weight_v":
a__ :int = value
elif weight_type == "bias":
a__ :Union[str, Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
a__ :Optional[int] = getattr(__lowerCAmelCase , __lowerCAmelCase )
a__ :List[Any] = value
else:
a__ :List[Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCamelCase__ ( a : int , a : Optional[Any] , a : str , a : List[Any] , a : Union[str, Any] ) -> Any:
"""simple docstring"""
a__ :Tuple = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCAmelCase ):
a__ :Optional[int] = PARAM_MAPPING[full_name.split("." )[-1]]
a__ :List[str] = "param"
if weight_type is not None and weight_type != "param":
a__ :int = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ :Optional[int] = ".".join([key, hf_param_name] )
else:
a__ :Tuple = key
a__ :str = value if "lm_head" in full_key else value[0]
snake_case__ = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def lowerCamelCase__ ( a : List[str] , a : Union[str, Any] , a : List[Any]=None , a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
a__ :Union[str, Any] = False
for key, mapped_key in MAPPING.items():
a__ :Union[str, Any] = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a__ :Union[str, Any] = True
if "*" in mapped_key:
a__ :Optional[Any] = name.split(__lowerCAmelCase )[0].split("." )[-2]
a__ :Any = mapped_key.replace("*" , __lowerCAmelCase )
if "weight_g" in name:
a__ :List[Any] = "weight_g"
elif "weight_v" in name:
a__ :Optional[int] = "weight_v"
elif "bias" in name:
a__ :Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ :Any = "weight"
else:
a__ :List[str] = None
if hf_dict is not None:
rename_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return is_used
return is_used
def lowerCamelCase__ ( a : Tuple , a : Any , a : Any ) -> int:
"""simple docstring"""
a__ :Any = []
a__ :Tuple = fairseq_model.state_dict()
a__ :int = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ :Dict = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
a__ :Tuple = True
else:
a__ :Optional[Any] = load_wavaveca_layer(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( a : Optional[int] , a : Optional[int] , a : int , a : Optional[Any] , a : Optional[Any] ) -> Dict:
"""simple docstring"""
a__ :List[Any] = full_name.split("conv_layers." )[-1]
a__ :List[str] = name.split("." )
a__ :List[Any] = int(items[0] )
a__ :Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
a__ :Optional[int] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
a__ :Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
a__ :List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
a__ :Dict = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def lowerCamelCase__ ( a : int , a : Optional[Any] , a : List[str]=None , a : Tuple=None , a : List[str]=True , a : Optional[int]=False ) -> Dict:
"""simple docstring"""
if config_path is not None:
a__ :str = WavaVecaConfig.from_pretrained(__lowerCAmelCase )
else:
a__ :int = WavaVecaConfig()
if is_seq_class:
a__ :Dict = read_txt_into_dict(__lowerCAmelCase )
a__ :Dict = idalabel
a__ :int = WavaVecaForSequenceClassification(__lowerCAmelCase )
a__ :Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
feature_extractor.save_pretrained(__lowerCAmelCase )
elif is_finetuned:
if dict_path:
a__ :List[str] = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ :Optional[int] = target_dict.pad_index
a__ :Optional[Any] = target_dict.bos_index
a__ :Union[str, Any] = target_dict.eos_index
a__ :Optional[Any] = len(target_dict.symbols )
a__ :int = os.path.join(__lowerCAmelCase , "vocab.json" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
a__ :Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
a__ :Tuple = 0
a__ :Tuple = 1
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
a__ :Union[str, Any] = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__lowerCAmelCase , )
a__ :int = True if config.feat_extract_norm == "layer" else False
a__ :Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
a__ :int = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
a__ :Any = WavaVecaForCTC(__lowerCAmelCase )
else:
a__ :Dict = WavaVecaForPreTraining(__lowerCAmelCase )
if is_finetuned or is_seq_class:
a__ , a__ , a__ :List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
a__ :Tuple = argparse.Namespace(task="audio_pretraining" )
a__ :Optional[int] = fairseq.tasks.setup_task(__lowerCAmelCase )
a__ , a__ , a__ :List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCAmelCase )
a__ :Optional[int] = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
snake_case__ = parser.parse_args()
snake_case__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 712 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def lowerCamelCase__ ( a : np.ndarray , a : float , a : int = 16_000 ) -> List[str]:
"""simple docstring"""
a__ :Optional[int] = int(round(sample_rate * max_length ) )
if len(a ) <= sample_length:
return wav
a__ :Optional[Any] = randint(0 , len(a ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowerCAmelCase_ :
lowerCamelCase_ = field(default=_a ,metadata={'help': 'Name of a dataset from the datasets package'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'A file containing the training audio paths and labels.'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'A file containing the validation audio paths and labels.'})
lowerCamelCase_ = field(
default='train' ,metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} ,)
lowerCamelCase_ = field(
default='validation' ,metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} ,)
lowerCamelCase_ = field(
default='audio' ,metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} ,)
lowerCamelCase_ = field(
default='label' ,metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
lowerCamelCase_ = field(
default=_a ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} ,)
lowerCamelCase_ = field(
default=_a ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} ,)
lowerCamelCase_ = field(
default=20 ,metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} ,)
@dataclass
class lowerCAmelCase_ :
lowerCamelCase_ = field(
default='facebook/wav2vec2-base' ,metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ,)
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
lowerCamelCase_ = field(
default='main' ,metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} ,)
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Name or path of preprocessor config.'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
lowerCamelCase_ = field(
default=_a ,metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} ,)
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase_ = field(
default=_a ,metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} ,)
def _snake_case ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , __A , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ :Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a__ , a__ , a__ :int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a__ , a__ , a__ :Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , a , a )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a__ :Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a )
transformers.utils.logging.set_verbosity(a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
a__ :List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a__ :List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
a__ :List[str] = DatasetDict()
a__ :int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
a__ :List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'''{', '.join(raw_datasets['train'].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--label_column_name` to the correct text column - one of "
F'''{', '.join(raw_datasets['train'].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
a__ :str = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
a__ :List[Any] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
a__ :Optional[Any] = feature_extractor.model_input_names[0]
def train_transforms(a : Union[str, Any] ):
a__ :List[str] = []
for audio in batch[data_args.audio_column_name]:
a__ :List[Any] = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(a )
a__ :Dict = feature_extractor(a , sampling_rate=feature_extractor.sampling_rate )
a__ :Dict = {model_input_name: inputs.get(a )}
a__ :Union[str, Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(a : Optional[int] ):
a__ :Any = [audio["array"] for audio in batch[data_args.audio_column_name]]
a__ :int = feature_extractor(a , sampling_rate=feature_extractor.sampling_rate )
a__ :int = {model_input_name: inputs.get(a )}
a__ :Dict = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
a__ :Union[str, Any] = raw_datasets["train"].features[data_args.label_column_name].names
a__ , a__ :str = {}, {}
for i, label in enumerate(a ):
a__ :Tuple = str(a )
a__ :List[str] = label
# Load the accuracy metric from the datasets package
a__ :Optional[int] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(a : List[Any] ):
a__ :str = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=a , references=eval_pred.label_ids )
a__ :Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(a ) , labelaid=a , idalabel=a , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a__ :Any = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
a__ :Tuple = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(a , output_all_columns=a )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
a__ :Optional[int] = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(a , output_all_columns=a )
# Initialize our trainer
a__ :List[str] = Trainer(
model=a , args=a , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=a , tokenizer=a , )
# Training
if training_args.do_train:
a__ :Tuple = None
if training_args.resume_from_checkpoint is not None:
a__ :List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a__ :Any = last_checkpoint
a__ :Any = trainer.train(resume_from_checkpoint=a )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a__ :int = trainer.evaluate()
trainer.log_metrics("eval" , a )
trainer.save_metrics("eval" , a )
# Write model card and (optionally) push to hub
a__ :str = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a )
else:
trainer.create_model_card(**a )
if __name__ == "__main__":
main()
| 373 | 0 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (KDPMaDiscreteScheduler,)
SCREAMING_SNAKE_CASE_ = 10
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = {
'num_train_timesteps': 1100,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def UpperCamelCase( self ) -> int:
'''simple docstring'''
if torch_device == "mps":
return
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def UpperCamelCase( self ) -> str:
'''simple docstring'''
if torch_device == "mps":
return
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase_ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = output.prev_sample
lowerCamelCase_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if str(SCREAMING_SNAKE_CASE_ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 42 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase_ = ksize + 1
lowerCamelCase_ = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(__UpperCamelCase ):
for x in range(__UpperCamelCase ):
# distance from center
lowerCamelCase_ = x - ksize // 2
lowerCamelCase_ = y - ksize // 2
# degree to radiant
lowerCamelCase_ = theta / 1_80 * np.pi
lowerCamelCase_ = np.cos(_theta )
lowerCamelCase_ = np.sin(_theta )
# get kernel x
lowerCamelCase_ = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A_ = imread("../image_data/lena.jpg")
# turn image in gray scale value
A_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A_ = out / out.max() * 255
A_ = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 42 | 1 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str], lowerCamelCase : Callable, lowerCamelCase : Optional[Features] = None, lowerCamelCase : str = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : Optional[dict] = None, lowerCamelCase : Optional[int] = None, **lowerCamelCase : Optional[int], )-> Optional[int]:
super().__init__(
features=lowerCamelCase, cache_dir=lowerCamelCase, keep_in_memory=lowerCamelCase, streaming=lowerCamelCase, num_proc=lowerCamelCase, **lowerCamelCase, )
lowerCamelCase__ : Union[str, Any] =Generator(
cache_dir=lowerCamelCase, features=lowerCamelCase, generator=lowerCamelCase, gen_kwargs=lowerCamelCase, **lowerCamelCase, )
def snake_case ( self : Union[str, Any] )-> Optional[int]:
# Build iterable dataset
if self.streaming:
lowerCamelCase__ : Any =self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
lowerCamelCase__ : List[str] =None
lowerCamelCase__ : List[str] =None
lowerCamelCase__ : Dict =None
lowerCamelCase__ : Optional[Any] =None
self.builder.download_and_prepare(
download_config=lowerCamelCase, download_mode=lowerCamelCase, verification_mode=lowerCamelCase, base_path=lowerCamelCase, num_proc=self.num_proc, )
lowerCamelCase__ : Dict =self.builder.as_dataset(
split='''train''', verification_mode=lowerCamelCase, in_memory=self.keep_in_memory )
return dataset
| 715 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def snake_case ( self : List[str] )-> str:
lowerCamelCase__ : Dict =32
lowerCamelCase__ : Optional[Any] =embedder_hidden_size
# image encoding components
lowerCamelCase__ : Dict =CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase, projection_dim=lowerCamelCase, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
lowerCamelCase__ : Dict =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCamelCase__ : Tuple =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=lowerCamelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowerCamelCase__ : Dict =UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCamelCase, layers_per_block=1, upcast_attention=lowerCamelCase, use_linear_projection=lowerCamelCase, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =DDIMScheduler(
beta_schedule='''scaled_linear''', beta_start=0.00_085, beta_end=0.012, prediction_type='''v_prediction''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL()
lowerCamelCase__ : int ={
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Any=0, lowerCamelCase : str=True )-> List[str]:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : List[Any] =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Dict =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if pil_image:
lowerCamelCase__ : int =input_image * 0.5 + 0.5
lowerCamelCase__ : Dict =input_image.clamp(0, 1 )
lowerCamelCase__ : List[str] =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase__ : Dict =DiffusionPipeline.numpy_to_pil(lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def snake_case ( self : List[str] )-> Optional[Any]:
lowerCamelCase__ : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : int =StableUnCLIPImgaImgPipeline(**lowerCamelCase )
lowerCamelCase__ : Any =sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase ).images
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Union[str, Any] =np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Tuple =torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def snake_case ( self : int )-> Optional[Any]:
lowerCamelCase__ : List[Any] =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : List[str] )-> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Any =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : List[Any] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowerCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowerCamelCase__ : Optional[int] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : str =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(lowerCamelCase, '''anime turle''', generator=lowerCamelCase, output_type='''np''' )
lowerCamelCase__ : Tuple =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
def snake_case ( self : Optional[int] )-> List[str]:
lowerCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Any =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''', torch_dtype=torch.floataa )
lowerCamelCase__ : Optional[Any] =pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : List[Any] =pipe(
lowerCamelCase, '''anime turtle''', num_inference_steps=2, output_type='''np''', )
lowerCamelCase__ : Optional[int] =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 625 | 0 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowercase__:
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple=1_3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=7 , SCREAMING_SNAKE_CASE_ : Dict=6 , SCREAMING_SNAKE_CASE_ : int=1_7 , SCREAMING_SNAKE_CASE_ : str=2_3 , SCREAMING_SNAKE_CASE_ : Dict=1_1 , SCREAMING_SNAKE_CASE_ : List[str]=True , ) -> Any:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = act_dim
lowercase_ = state_dim
lowercase_ = hidden_size
lowercase_ = max_length
lowercase_ = is_training
def _lowercase ( self : str ) -> Optional[Any]:
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
lowercase_ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _lowercase ( self : int ) -> List[Any]:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , ) -> Dict:
lowercase_ = DecisionTransformerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _lowercase ( self : Union[str, Any] ) -> Dict:
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class lowercase__( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Dict = (DecisionTransformerModel,) if is_torch_available() else ()
a :int = ()
a :Optional[Any] = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
a :List[str] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
a :Optional[int] = False
a :Optional[int] = False
a :Union[str, Any] = False
a :Optional[Any] = False
a :Optional[int] = False
a :Optional[Any] = False
a :Any = False
a :Union[str, Any] = False
a :Tuple = False
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = DecisionTransformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : Any ) -> str:
self.config_tester.run_common_tests()
def _lowercase ( self : Dict ) -> int:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : List[str] ) -> Union[str, Any]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DecisionTransformerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> int:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(SCREAMING_SNAKE_CASE_ )] , SCREAMING_SNAKE_CASE_ )
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Optional[Any] ) -> str:
lowercase_ = 2 # number of steps of autoregressive prediction we will perform
lowercase_ = 1_0 # defined by the RL environment, may be normalized
lowercase_ = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
lowercase_ = model.to(SCREAMING_SNAKE_CASE_ )
lowercase_ = model.config
torch.manual_seed(0 )
lowercase_ = torch.randn(1 , 1 , config.state_dim ).to(device=SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) # env.reset()
lowercase_ = torch.tensor(
[[0.24_27_93, -0.28_69_30_74, 0.8_74_26_13], [0.67_81_52_74, -0.08_10_10_85, -0.12_95_21_47]] , device=SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase_ = state
lowercase_ = torch.zeros(1 , 0 , config.act_dim , device=SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
lowercase_ = torch.zeros(1 , 0 , device=SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
lowercase_ = torch.tensor(0 , device=SCREAMING_SNAKE_CASE_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(SCREAMING_SNAKE_CASE_ ):
lowercase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=SCREAMING_SNAKE_CASE_ )] , dim=1 )
lowercase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=SCREAMING_SNAKE_CASE_ )] , dim=1 )
lowercase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ = model(
states=SCREAMING_SNAKE_CASE_ , actions=SCREAMING_SNAKE_CASE_ , rewards=SCREAMING_SNAKE_CASE_ , returns_to_go=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ = action_pred[0, -1]
lowercase_ = torch.cat([states, state] , dim=1 )
lowercase_ = returns_to_go[0, -1] - reward
lowercase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=SCREAMING_SNAKE_CASE_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 97 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCamelCase__ :
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case ) -> Dict:
"""simple docstring"""
return None
class lowerCamelCase__ :
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
return None
class lowerCamelCase__ ( unittest.TestCase ):
__UpperCAmelCase = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case , """tf""" , 1_2 , **snake_case )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case , """pt""" , 1_2 , **snake_case )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
from transformers import BertModel
lowercase : str = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(snake_case ) )
vocab_file.flush()
lowercase : Any = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase : Any = BertModel(BertConfig(vocab_size=len(snake_case ) ) )
model.save_pretrained(snake_case )
self._test_export(snake_case , """pt""" , 1_2 , snake_case )
@require_tf
@slow
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase : int = self._test_export(snake_case , """tf""" , 1_2 , **snake_case )
lowercase : str = quantize(Path(snake_case ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase : int = self._test_export(snake_case , """pt""" , 1_2 , **snake_case )
lowercase : Dict = quantize(snake_case )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case , snake_case=None , **snake_case ) -> List[str]:
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase : Union[str, Any] = Path(snake_case ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case )
return path
except Exception as e:
self.fail(snake_case )
@require_torch
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
from transformers import BertModel
lowercase : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowercase : int = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(snake_case , snake_case , """pt""" )
@require_tf
@require_tokenizers
@slow
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
from transformers import TFBertModel
lowercase : Dict = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowercase : Any = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(snake_case , snake_case , """tf""" )
def _UpperCAmelCase ( self , snake_case , snake_case , snake_case ) -> List[Any]:
"""simple docstring"""
lowercase : Tuple = FeatureExtractionPipeline(snake_case , snake_case )
lowercase : str = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowercase , lowercase , lowercase , lowercase : Dict = infer_shapes(snake_case , snake_case )
# Assert all variables are present
self.assertEqual(len(snake_case ) , len(snake_case ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , snake_case )
self.assertSequenceEqual(variable_names[3:] , snake_case )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
lowercase : Union[str, Any] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowercase : List[Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowercase , lowercase : int = ensure_valid_input(FuncContiguousArgs() , snake_case , snake_case )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(snake_case ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(snake_case ) , set(snake_case ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(snake_case , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase , lowercase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , snake_case , snake_case )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(snake_case ) , 1 )
self.assertEqual(len(snake_case ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
lowercase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 607 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase : List[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float , __lowerCAmelCase : int = 1_6000 ):
lowerCamelCase__ = int(round(sample_rate * max_length ) )
if len(__lowerCAmelCase ) <= sample_length:
return wav
lowerCamelCase__ = randint(0 , len(__lowerCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = field(default=a ,metadata={'help': 'Name of a dataset from the datasets package'} )
_UpperCamelCase = field(
default=a ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_UpperCamelCase = field(
default=a ,metadata={'help': 'A file containing the training audio paths and labels.'} )
_UpperCamelCase = field(
default=a ,metadata={'help': 'A file containing the validation audio paths and labels.'} )
_UpperCamelCase = field(
default='train' ,metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} ,)
_UpperCamelCase = field(
default='validation' ,metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} ,)
_UpperCamelCase = field(
default='audio' ,metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} ,)
_UpperCamelCase = field(
default='label' ,metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
_UpperCamelCase = field(
default=a ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} ,)
_UpperCamelCase = field(
default=a ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} ,)
_UpperCamelCase = field(
default=20 ,metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} ,)
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = field(
default='facebook/wav2vec2-base' ,metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ,)
_UpperCamelCase = field(
default=a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCamelCase = field(
default=a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
_UpperCamelCase = field(
default='main' ,metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} ,)
_UpperCamelCase = field(
default=a ,metadata={'help': 'Name or path of preprocessor config.'} )
_UpperCamelCase = field(
default=a ,metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
_UpperCamelCase = field(
default=a ,metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
_UpperCamelCase = field(
default=a ,metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} ,)
_UpperCamelCase = field(
default=a ,metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_UpperCamelCase = field(
default=a ,metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} ,)
def UpperCamelCase_ ( self ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" ,_lowerCAmelCase ,)
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def A__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowerCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowerCamelCase__ = DatasetDict()
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F'''{", ".join(raw_datasets["train"].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"""Make sure to set `--label_column_name` to the correct text column - one of """
F'''{", ".join(raw_datasets["train"].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowerCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowerCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(__lowerCAmelCase : Any ):
lowerCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
lowerCamelCase__ = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__lowerCAmelCase )
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase__ = {model_input_name: inputs.get(__lowerCAmelCase )}
lowerCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__lowerCAmelCase : Any ):
lowerCamelCase__ = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowerCamelCase__ = feature_extractor(__lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase__ = {model_input_name: inputs.get(__lowerCAmelCase )}
lowerCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCamelCase__ = raw_datasets["""train"""].features[data_args.label_column_name].names
lowerCamelCase__ , lowerCamelCase__ = {}, {}
for i, label in enumerate(__lowerCAmelCase ):
lowerCamelCase__ = str(__lowerCAmelCase )
lowerCamelCase__ = label
# Load the accuracy metric from the datasets package
lowerCamelCase__ = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase : List[Any] ):
lowerCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__lowerCAmelCase , references=eval_pred.label_ids )
lowerCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase__ = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__lowerCAmelCase , output_all_columns=__lowerCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase__ = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__lowerCAmelCase , output_all_columns=__lowerCAmelCase )
# Initialize our trainer
lowerCamelCase__ = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , )
# Training
if training_args.do_train:
lowerCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ = last_checkpoint
lowerCamelCase__ = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__ = trainer.evaluate()
trainer.log_metrics("""eval""" , __lowerCAmelCase )
trainer.save_metrics("""eval""" , __lowerCAmelCase )
# Write model card and (optionally) push to hub
lowerCamelCase__ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
if __name__ == "__main__":
main()
| 715 |
'''simple docstring'''
import numpy
# List of input, output pairs
UpperCamelCase : List[Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
UpperCamelCase : int = [2, 4, 1, 5]
UpperCamelCase : int = len(train_data)
UpperCamelCase : Dict = 0.009
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ):
return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output(
__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Any ):
lowerCamelCase__ = 0
for i in range(len(__lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ):
lowerCamelCase__ = 0
for i in range(__lowerCAmelCase ):
if index == -1:
summation_value += _error(__lowerCAmelCase )
else:
summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def A__ ( __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m
return cost_derivative_value
def A__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase__ = 0.00_0002
lowerCamelCase__ = 0
lowerCamelCase__ = 0
while True:
j += 1
lowerCamelCase__ = [0, 0, 0, 0]
for i in range(0 , len(__lowerCAmelCase ) ):
lowerCamelCase__ = get_cost_derivative(i - 1 )
lowerCamelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ):
break
lowerCamelCase__ = temp_parameter_vector
print(("""Number of iterations:""", j) )
def A__ ( ):
for i in range(len(__lowerCAmelCase ) ):
print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 9 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Dict = 5
# Realm tok
SCREAMING_SNAKE_CASE_ : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(self.tmpdirname , 'realm_tokenizer' )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : str = os.path.join(self.tmpdirname , 'realm_block_records' )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
} )
return dataset
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.array(
[
b'This is the first record',
b'This is the second record',
b'This is the third record',
b'This is the fourth record',
b'This is the fifth record',
b'This is a longer longer longer record',
] , dtype=lowerCAmelCase__ , )
return block_records
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.get_config()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_retriever()
SCREAMING_SNAKE_CASE_ : List[str] = retriever.tokenizer
SCREAMING_SNAKE_CASE_ : Any = np.array([0, 3] , dtype='long' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(['Test question'] ).input_ids
SCREAMING_SNAKE_CASE_ : int = tokenizer(
['the fourth'] , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , ).input_ids
SCREAMING_SNAKE_CASE_ : Any = config.reader_seq_len
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = retriever(
lowerCAmelCase__ , lowerCAmelCase__ , answer_ids=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors='np' )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_retriever()
SCREAMING_SNAKE_CASE_ : Tuple = retriever.tokenizer
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([0, 3, 5] , dtype='long' )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(['Test question'] ).input_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , ).input_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config.reader_seq_len
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = retriever(
lowerCAmelCase__ , lowerCAmelCase__ , answer_ids=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors='np' )
self.assertEqual([False, True, True] , lowerCAmelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowerCAmelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
# Test local path
SCREAMING_SNAKE_CASE_ : Optional[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download:
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME )
SCREAMING_SNAKE_CASE_ : Optional[Any] = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' )
self.assertEqual(retriever.block_records[0] , b'This is the first record' )
| 101 |
'''simple docstring'''
from __future__ import annotations
def __snake_case ( lowercase : list[float] , lowercase : list[float] ):
snake_case_ = sorted(numsa + numsa )
snake_case_ , snake_case_ = divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = [float(x) for x in input('''Enter the elements of first array: ''').split()]
lowercase__ = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 508 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 239 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=[10, 20, 30, 40] , lowerCAmelCase__=[2, 2, 3, 2] , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=10 , lowerCAmelCase__=0.02 , lowerCAmelCase__=["stage2", "stage3", "stage4"] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=None , ):
'''simple docstring'''
_UpperCamelCase : str = parent
_UpperCamelCase : Any = batch_size
_UpperCamelCase : str = image_size
_UpperCamelCase : Any = num_channels
_UpperCamelCase : Union[str, Any] = num_stages
_UpperCamelCase : Any = hidden_sizes
_UpperCamelCase : Optional[Any] = depths
_UpperCamelCase : Union[str, Any] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : List[Any] = intermediate_size
_UpperCamelCase : Optional[int] = hidden_act
_UpperCamelCase : int = num_labels
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : Dict = out_features
_UpperCamelCase : Any = out_indices
_UpperCamelCase : Any = scope
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Optional[int] = None
if self.use_labels:
_UpperCamelCase : int = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase : str = self.get_config()
return config, pixel_values, labels
def lowercase_ (self ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : str = ConvNextVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : Optional[Any] = model(lowerCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : int = ConvNextVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : Optional[int] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Tuple = ConvNextVaBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : str = model(lowerCAmelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = ConvNextVaBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase : Any = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = config_and_inputs
_UpperCamelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : int = ConvNextVaModelTester(self )
_UpperCamelCase : List[str] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def lowercase_ (self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ (self ):
'''simple docstring'''
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def lowercase_ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def lowercase_ (self ):
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def lowercase_ (self ):
'''simple docstring'''
pass
def lowercase_ (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCamelCase : Dict = True
if model_class.__name__ in [
*get_values(lowerCAmelCase__ ),
*get_values(lowerCAmelCase__ ),
]:
continue
_UpperCamelCase : Dict = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
_UpperCamelCase : List[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
_UpperCamelCase : str = model(**lowerCAmelCase__ ).loss
loss.backward()
def lowercase_ (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_UpperCamelCase , _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
_UpperCamelCase : Tuple = False
_UpperCamelCase : Union[str, Any] = True
if (
model_class.__name__
in [*get_values(lowerCAmelCase__ ), *get_values(lowerCAmelCase__ )]
or not model_class.supports_gradient_checkpointing
):
continue
_UpperCamelCase : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.gradient_checkpointing_enable()
model.train()
_UpperCamelCase : List[str] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
_UpperCamelCase : Dict = model(**lowerCAmelCase__ ).loss
loss.backward()
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[int] = model_class(lowerCAmelCase__ )
_UpperCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[Any] = [*signature.parameters.keys()]
_UpperCamelCase : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase : Optional[Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCamelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : Any = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase__ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def lowercase_ (self ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : Union[str, Any] = ConvNextVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __lowerCAmelCase ( ) -> List[str]:
_UpperCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ (self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[str] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : str = preprocessor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(**lowerCAmelCase__ )
# verify the logits
_UpperCamelCase : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCamelCase : Tuple = torch.tensor([0.9996, 0.1966, -0.4386] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 239 | 1 |
'''simple docstring'''
from ....utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : Optional[int],__A : int,__A : List[Any]=None,__A : Any=2_0_4_8 ):
_lowerCamelCase : List[str] = config.__dict__
_lowerCamelCase : Union[str, Any] = modal_hidden_size
if num_labels:
_lowerCamelCase : List[Any] = num_labels | 44 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : int = ""
else:
_lowerCamelCase : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ViTConfig()
_lowerCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = int(vit_name[-12:-10] )
_lowerCamelCase : str = int(vit_name[-9:-6] )
else:
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Any = "imagenet-1k-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = int(vit_name[-6:-4] )
_lowerCamelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Optional[int] = 768
_lowerCamelCase : Union[str, Any] = 12
_lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("small" ):
_lowerCamelCase : Optional[Any] = 384
_lowerCamelCase : Optional[Any] = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[Any] = 2304
_lowerCamelCase : List[Any] = 8
_lowerCamelCase : List[Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Optional[Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : Union[str, Any] = 16
elif vit_name[4:].startswith("huge" ):
_lowerCamelCase : str = 1280
_lowerCamelCase : List[Any] = 5120
_lowerCamelCase : List[str] = 32
_lowerCamelCase : List[str] = 16
# load original model from timm
_lowerCamelCase : int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : int = ViTModel(_lowerCAmelCase ).eval()
else:
_lowerCamelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
if base_model:
_lowerCamelCase : int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_lowerCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path) | 44 | 1 |
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
'''simple docstring'''
lowercase = [int(__snake_case ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(__snake_case ) == 4 and all(0 <= int(__snake_case ) <= 2_54 for octet in octets )
if __name__ == "__main__":
_UpperCamelCase : List[str] = input().strip()
_UpperCamelCase : List[str] = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 707 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_UpperCamelCase : Tuple = '\\n Text data.\n Second line of data.'
_UpperCamelCase : Tuple = 'file'
@pytest.fixture(scope='session' )
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
lowercase = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
lowercase = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , __snake_case ) , 'w' ) as f:
f.write(__snake_case )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : str ):
'''simple docstring'''
lowercase = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
lowercase = input_paths[compression_format]
lowercase = tmp_path / 'cache'
lowercase = DownloadConfig(cache_dir=__snake_case , extract_compressed_file=__snake_case )
lowercase = cached_path(__snake_case , download_config=__snake_case )
with open(__snake_case ) as f:
lowercase = f.read()
with open(__snake_case ) as f:
lowercase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : int , __snake_case : Optional[Any] ):
'''simple docstring'''
lowercase = 'custom_cache'
lowercase = 'custom_extracted_dir'
lowercase = tmp_path / 'custom_extracted_path'
if default_extracted:
lowercase = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , __snake_case )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__snake_case ) )
lowercase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase = xz_file
lowercase = (
DownloadConfig(extract_compressed_file=__snake_case )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__snake_case )
)
lowercase = cached_path(__snake_case , download_config=__snake_case )
assert Path(__snake_case ).parent.parts[-2:] == expected
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
lowercase = str(Path(__snake_case ).resolve() )
assert cached_path(__snake_case ) == text_file
# relative path
lowercase = str(Path(__snake_case ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__snake_case ) == text_file
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
lowercase = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(__snake_case ):
cached_path(__snake_case )
# relative path
lowercase = './__missing_file__.txt'
with pytest.raises(__snake_case ):
cached_path(__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : Tuple ):
'''simple docstring'''
lowercase = get_from_cache(f'tmp://{tmpfs_file}' )
with open(__snake_case ) as f:
lowercase = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with pytest.raises(__snake_case ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] ):
'''simple docstring'''
lowercase = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__snake_case ):
http_get('https://huggingface.co' , temp_file=__snake_case )
with pytest.raises(__snake_case ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] ):
'''simple docstring'''
lowercase = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__snake_case ):
ftp_get('ftp://huggingface.co' , temp_file=__snake_case )
with pytest.raises(__snake_case ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : Any ):
'''simple docstring'''
lowercase = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(__snake_case ):
fsspec_get('s3://huggingface.co' , temp_file=__snake_case )
with pytest.raises(__snake_case ):
fsspec_head('s3://huggingface.co' )
| 134 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : List[Any]=7 , _lowerCAmelCase : int=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Tuple=99 , _lowerCAmelCase : Any=36 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : List[str]=37 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : str=512 , _lowerCAmelCase : Any=16 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Dict=6 , _lowerCAmelCase : str=6 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=1000 , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = patch_size
__lowercase = text_seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowercase = text_seq_length
__lowercase = (image_size // patch_size) ** 2 + 1
__lowercase = self.text_seq_length + self.image_seq_length
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowercase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowercase = bbox[i, j, 3]
__lowercase = bbox[i, j, 1]
__lowercase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowercase = bbox[i, j, 2]
__lowercase = bbox[i, j, 0]
__lowercase = t
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowercase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ) -> int:
"""simple docstring"""
__lowercase = LayoutLMvaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# text + image
__lowercase = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ )
__lowercase = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__lowercase = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__lowercase = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowercase = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowercase = model(pixel_values=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = LayoutLMvaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__lowercase = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = LayoutLMvaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__lowercase = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = LayoutLMvaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__lowercase = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
__lowercase
) = config_and_inputs
__lowercase = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
__snake_case :List[str] = False
__snake_case :Union[str, Any] = False
__snake_case :List[str] = False
__snake_case :str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__snake_case :str = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ) -> Any:
"""simple docstring"""
return True
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = LayoutLMvaModelTester(self )
__lowercase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _a ( self : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
__lowercase = copy.deepcopy(UpperCamelCase__ )
if model_class in get_values(UpperCamelCase__ ):
__lowercase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCamelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
__lowercase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in get_values(UpperCamelCase__ ):
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
return inputs_dict
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def _a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
@slow
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = LayoutLMvaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None
@slow
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(UpperCamelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCamelCase__ )
__lowercase = torch.tensor([[1, 2]] )
__lowercase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__lowercase = model(
input_ids=input_ids.to(UpperCamelCase__ ) , bbox=bbox.to(UpperCamelCase__ ) , pixel_values=pixel_values.to(UpperCamelCase__ ) , )
# verify the logits
__lowercase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ )
__lowercase = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 80 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
while second != 0:
__lowerCAmelCase: int = first & second
first ^= second
__lowerCAmelCase: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = int(input("Enter the first number: ").strip())
__A = int(input("Enter the second number: ").strip())
print(F'''{add(first, second) = }''')
| 346 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : int = ProphetNetTokenizer
lowerCAmelCase : Tuple = False
def __A ( self ):
super().setUp()
A__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __A ( self , UpperCAmelCase__ ):
A__ = "UNwant\u00E9d,running"
A__ = "unwanted, running"
return input_text, output_text
def __A ( self ):
A__ = self.tokenizer_class(self.vocab_file )
A__ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCAmelCase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [9, 6, 7, 12, 10, 11] )
def __A ( self ):
A__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __A ( self ):
A__ = BasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __A ( self ):
A__ = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHรคLLo!how \n Are yoU? " ) , ["hรคllo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __A ( self ):
A__ = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHรคLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __A ( self ):
A__ = BasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHรคLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __A ( self ):
A__ = BasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __A ( self ):
A__ = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHรคLLo!how \n Are yoU? " ) , ["HรคLLo", "!", "how", "Are", "yoU", "?"] )
def __A ( self ):
A__ = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHรคLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __A ( self ):
A__ = BasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __A ( self ):
A__ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
A__ = {}
for i, token in enumerate(UpperCAmelCase__ ):
A__ = i
A__ = WordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def __A ( self ):
A__ = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
A__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
A__ = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
A__ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __A ( self ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __A ( self ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __A ( self ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def __A ( self ):
A__ = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
A__ = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase__ )
A__ = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase__ )
A__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
A__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 232 |
def UpperCamelCase ( _A : list[int] , _A : int )-> bool:
"""simple docstring"""
A__ = len(_A )
A__ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
A__ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
A__ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
A__ = subset[i - 1][j]
if arr[i - 1] <= j:
A__ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
UpperCAmelCase = tuple[int, int]
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None:
_lowerCamelCase : set[int] = vertices
_lowerCamelCase : dict[EdgeT, int] = {
(min(SCREAMING_SNAKE_CASE), max(SCREAMING_SNAKE_CASE)): weight for edge, weight in edges.items()
}
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None:
self.vertices.add(edge[0])
self.vertices.add(edge[1])
_lowerCamelCase : Optional[int] = weight
def UpperCamelCase_ ( self) -> Graph:
_lowerCamelCase : Graph = Graph({min(self.vertices)} , {})
_lowerCamelCase : EdgeT
_lowerCamelCase : int
_lowerCamelCase : EdgeT
_lowerCamelCase : int
while len(subgraph.vertices) < len(self.vertices):
_lowerCamelCase : List[str] = max(self.edges.values()) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_lowerCamelCase : int = edge
_lowerCamelCase : List[Any] = weight
subgraph.add_edge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
return subgraph
def _snake_case ( __snake_case : str = "p107_network.txt" ):
"""simple docstring"""
_lowerCamelCase : str = os.path.abspath(os.path.dirname(__snake_case ) )
_lowerCamelCase : str = os.path.join(__snake_case , __snake_case )
_lowerCamelCase : dict[EdgeT, int] = {}
_lowerCamelCase : list[str]
_lowerCamelCase : int
_lowerCamelCase : int
with open(__snake_case ) as f:
_lowerCamelCase : Optional[Any] = f.read().strip().split("""\n""" )
_lowerCamelCase : List[str] = [line.split(""",""" ) for line in data]
for edgea in range(1 , len(__snake_case ) ):
for edgea in range(__snake_case ):
if adjaceny_matrix[edgea][edgea] != "-":
_lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] )
_lowerCamelCase : Graph = Graph(set(range(len(__snake_case ) ) ) , __snake_case )
_lowerCamelCase : Graph = graph.prims_algorithm()
_lowerCamelCase : int = sum(graph.edges.values() )
_lowerCamelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase_ : int = 16
lowerCamelCase_ : str = 32
def A__ ( lowerCamelCase ) -> int:
return int(x / 2**20 )
class _UpperCamelCase :
'''simple docstring'''
def __enter__( self : int ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCamelCase_: str = torch.cuda.memory_allocated()
return self
def __exit__( self : List[Any] , *snake_case_ : Union[str, Any] ):
gc.collect()
torch.cuda.empty_cache()
UpperCamelCase_: List[str] = torch.cuda.memory_allocated()
UpperCamelCase_: int = torch.cuda.max_memory_allocated()
UpperCamelCase_: Optional[int] = bamb(self.end - self.begin )
UpperCamelCase_: Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def A__ ( lowerCamelCase , lowerCamelCase = 16 , lowerCamelCase = "bert-base-cased" , lowerCamelCase = 3_20 , lowerCamelCase = 1_60 , ) -> Dict:
UpperCamelCase_: str = AutoTokenizer.from_pretrained(lowerCamelCase )
UpperCamelCase_: List[str] = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": F'''train[:{n_train}]''', """validation""": F'''validation[:{n_val}]'''} )
def tokenize_function(lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_: Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase_: Dict = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_: int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCamelCase_: List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
UpperCamelCase_: Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
return train_dataloader, eval_dataloader
def A__ ( lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialize accelerator
UpperCamelCase_: Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_: str = config["""lr"""]
UpperCamelCase_: Any = int(config["""num_epochs"""] )
UpperCamelCase_: Tuple = int(config["""seed"""] )
UpperCamelCase_: Dict = int(config["""batch_size"""] )
UpperCamelCase_: Dict = args.model_name_or_path
set_seed(lowerCamelCase )
UpperCamelCase_, UpperCamelCase_: int = get_dataloaders(lowerCamelCase , lowerCamelCase , lowerCamelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_: Any = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase , return_dict=lowerCamelCase )
# Instantiate optimizer
UpperCamelCase_: Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase_: List[Any] = optimizer_cls(params=model.parameters() , lr=lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase_: Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCamelCase_: List[Any] = 1
UpperCamelCase_: Optional[int] = (len(lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase_: List[Any] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=0 , num_training_steps=lowerCamelCase , )
else:
UpperCamelCase_: Dict = DummyScheduler(lowerCamelCase , total_num_steps=lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: str = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase_: str = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase_: Dict = 0
# Now we train the model
UpperCamelCase_: Dict = {}
for epoch in range(lowerCamelCase , lowerCamelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = model(**lowerCamelCase )
UpperCamelCase_: str = outputs.loss
UpperCamelCase_: List[str] = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCamelCase_: Optional[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
def A__ ( ) -> Optional[Any]:
UpperCamelCase_: Any = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase , )
parser.add_argument(
"""--output_dir""" , type=lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=lowerCamelCase , default=lowerCamelCase , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=lowerCamelCase , default=3_20 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=lowerCamelCase , default=1_60 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase , default=1 , help="""Number of train epochs.""" , )
UpperCamelCase_: Optional[int] = parser.parse_args()
UpperCamelCase_: Union[str, Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
main()
| 548 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE ="\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=8 ):
lowercase_ : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Any = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict=5_12 , __SCREAMING_SNAKE_CASE : Optional[Any]=5_12 ):
lowercase_ : Optional[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Dict = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : str = np.transpose(__SCREAMING_SNAKE_CASE , [2, 0, 1] )
lowercase_ : List[Any] = torch.from_numpy(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
return image
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__UpperCamelCase ,scheduler=__UpperCamelCase ,movq=__UpperCamelCase ,)
lowercase_ : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = min(int(num_inference_steps * strength ) ,__UpperCamelCase )
lowercase_ : Any = max(num_inference_steps - init_timestep ,0 )
lowercase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(__UpperCamelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__UpperCamelCase )}''' )
lowercase_ : Optional[int] = image.to(device=__UpperCamelCase ,dtype=__UpperCamelCase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : Optional[Any] = image
else:
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Dict = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__UpperCamelCase )
]
lowercase_ : Union[str, Any] = torch.cat(__UpperCamelCase ,dim=0 )
else:
lowercase_ : Optional[Any] = self.movq.encode(__UpperCamelCase ).latent_dist.sample(__UpperCamelCase )
lowercase_ : List[str] = self.movq.config.scaling_factor * init_latents
lowercase_ : Any = torch.cat([init_latents] ,dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Any = randn_tensor(__UpperCamelCase ,generator=__UpperCamelCase ,device=__UpperCamelCase ,dtype=__UpperCamelCase )
# get latents
lowercase_ : Tuple = self.scheduler.add_noise(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowercase_ : int = init_latents
return latents
def _UpperCAmelCase ( self ,__UpperCamelCase=0 ) -> List[str]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : Optional[Any] = torch.device(f'''cuda:{gpu_id}''' )
lowercase_ : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase=0 ) -> int:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[str] = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' ,silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ : Tuple = cpu_offload_with_hook(__UpperCamelCase ,__UpperCamelCase ,prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
lowercase_ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
if not hasattr(self.unet ,'_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase ,'_hf_hook' )
and hasattr(module._hf_hook ,'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = 512 ,__UpperCamelCase = 512 ,__UpperCamelCase = 100 ,__UpperCamelCase = 4.0 ,__UpperCamelCase = 0.3 ,__UpperCamelCase = 1 ,__UpperCamelCase = None ,__UpperCamelCase = "pil" ,__UpperCamelCase = True ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : int = guidance_scale > 1.0
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Union[str, Any] = torch.cat(__UpperCamelCase ,dim=0 )
lowercase_ : Union[str, Any] = image_embeds.shape[0]
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Tuple = torch.cat(__UpperCamelCase ,dim=0 )
if do_classifier_free_guidance:
lowercase_ : Tuple = image_embeds.repeat_interleave(__UpperCamelCase ,dim=0 )
lowercase_ : List[str] = negative_image_embeds.repeat_interleave(__UpperCamelCase ,dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__UpperCamelCase )
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(__UpperCamelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f'''Input is in incorrect format: {[type(__UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
lowercase_ : Optional[int] = torch.cat([prepare_image(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) for i in image] ,dim=0 )
lowercase_ : List[str] = image.to(dtype=image_embeds.dtype ,device=__UpperCamelCase )
lowercase_ : Optional[Any] = self.movq.encode(__UpperCamelCase )['latents']
lowercase_ : List[Any] = latents.repeat_interleave(__UpperCamelCase ,dim=0 )
self.scheduler.set_timesteps(__UpperCamelCase ,device=__UpperCamelCase )
lowercase_ : Any = self.get_timesteps(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ : Any = downscale_height_and_width(__UpperCamelCase ,__UpperCamelCase ,self.movq_scale_factor )
lowercase_ : str = self.prepare_latents(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,image_embeds.dtype ,__UpperCamelCase ,__UpperCamelCase )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Tuple = {'image_embeds': image_embeds}
lowercase_ : Dict = self.unet(
sample=__UpperCamelCase ,timestep=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,added_cond_kwargs=__UpperCamelCase ,return_dict=__UpperCamelCase ,)[0]
if do_classifier_free_guidance:
lowercase_ : List[Any] = noise_pred.split(latents.shape[1] ,dim=1 )
lowercase_ : int = noise_pred.chunk(2 )
lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ : Any = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Union[str, Any] = self.scheduler.step(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,generator=__UpperCamelCase ,)[0]
# post-processing
lowercase_ : Any = self.movq.decode(__UpperCamelCase ,force_not_quantize=__UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase_ : Union[str, Any] = image * 0.5 + 0.5
lowercase_ : int = image.clamp(0 ,1 )
lowercase_ : Dict = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 700 | """simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__SCREAMING_SNAKE_CASE ={
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
__SCREAMING_SNAKE_CASE =logging.WARNING
def lowercase__( ):
lowercase_ : List[Any] = os.getenv('DATASETS_VERBOSITY' , __SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase__( ):
return __name__.split('.' )[0]
def lowercase__( ):
return logging.getLogger(_get_library_name() )
def lowercase__( ):
# Apply our default configuration to the library root logger.
lowercase_ : Optional[int] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase__( ):
lowercase_ : int = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[str] = None ):
if name is None:
lowercase_ : Union[str, Any] = _get_library_name()
return logging.getLogger(__SCREAMING_SNAKE_CASE )
def lowercase__( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
_get_library_root_logger().setLevel(__SCREAMING_SNAKE_CASE )
def lowercase__( ):
return set_verbosity(__SCREAMING_SNAKE_CASE )
def lowercase__( ):
return set_verbosity(__SCREAMING_SNAKE_CASE )
def lowercase__( ):
return set_verbosity(__SCREAMING_SNAKE_CASE )
def lowercase__( ):
return set_verbosity(__SCREAMING_SNAKE_CASE )
def lowercase__( ):
lowercase_ : Tuple = False
def lowercase__( ):
lowercase_ : Any = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class UpperCamelCase :
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> int: # pylint: disable=unused-argument
'''simple docstring'''
lowercase_ : Optional[Any] = args[0] if args else None
def __iter__( self ) -> Union[str, Any]:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
def empty_fn(*__UpperCamelCase ,**__UpperCamelCase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Optional[int]:
'''simple docstring'''
return self
def __exit__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
return
__SCREAMING_SNAKE_CASE =True
class UpperCamelCase :
def __call__( self ,*__UpperCamelCase ,__UpperCamelCase=False ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__UpperCamelCase ,**__UpperCamelCase )
else:
return EmptyTqdm(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Tuple = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__SCREAMING_SNAKE_CASE =_tqdm_cls()
def lowercase__( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase__( ):
global _tqdm_active
lowercase_ : Union[str, Any] = True
def lowercase__( ):
global _tqdm_active
lowercase_ : Optional[int] = False
| 477 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'Wav2Vec2FeatureExtractor'
SCREAMING_SNAKE_CASE__ = 'AutoTokenizer'
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
a :Any = self.feature_extractor
a :int = False
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , _lowerCamelCase , **_lowerCamelCase ):
try:
return super().from_pretrained(_lowerCamelCase , **_lowerCamelCase )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _lowerCamelCase , )
a :Tuple = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
a :List[str] = WavaVecaCTCTokenizer.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
return cls(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
a :Optional[Any] = kwargs.pop('''raw_speech''' )
else:
a :Optional[int] = kwargs.pop('''audio''' , _lowerCamelCase )
a :int = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
a :Dict = kwargs.pop('''text''' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
a :Optional[int] = args[0]
a :int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
a :Union[str, Any] = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
if text is not None:
a :List[Any] = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a :Tuple = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_lowerCamelCase , **_lowerCamelCase )
a :List[str] = kwargs.pop('''input_features''' , _lowerCamelCase )
a :List[Any] = kwargs.pop('''labels''' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
a :List[Any] = args[0]
a :int = args[1:]
if input_features is not None:
a :Optional[Any] = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if labels is not None:
a :List[Any] = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
a :str = labels['''input_ids''']
return input_features
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
a :Tuple = True
a :int = self.tokenizer
yield
a :Optional[Any] = self.feature_extractor
a :Tuple = False
| 445 |
import datasets
snake_case : int = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
snake_case : Tuple = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
snake_case : Union[str, Any] = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return {"accuracy": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
| 445 | 1 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
'''simple docstring'''
def __init__( self: int , _lowerCamelCase: List[str] , _lowerCamelCase: List[Any]=3 , _lowerCamelCase: Union[str, Any]=32 , _lowerCamelCase: List[Any]=3 , _lowerCamelCase: Tuple=10 , _lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , _lowerCamelCase: List[str]=[1, 1, 2, 1] , _lowerCamelCase: str=True , _lowerCamelCase: List[Any]=True , _lowerCamelCase: str="relu" , _lowerCamelCase: Optional[Any]=3 , _lowerCamelCase: Union[str, Any]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embeddings_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = len(_lowerCamelCase )
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def _A ( self: Tuple ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _A ( self: Tuple , _lowerCamelCase: List[str] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: int ):
SCREAMING_SNAKE_CASE_ = RegNetModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _A ( self: Union[str, Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = RegNetForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self: Dict ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : List[str] = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = RegNetModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _A ( self: List[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A ( self: List[Any] ):
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _A ( self: Union[str, Any] ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _A ( self: Optional[Any] ):
pass
def _A ( self: List[str] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _A ( self: Tuple ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _A ( self: Dict ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(config=_lowerCamelCase )
for name, module in model.named_modules():
if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def _A ( self: Dict ):
def check_hidden_states_output(_lowerCamelCase: str , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Dict ):
SCREAMING_SNAKE_CASE_ = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE_ = layer_type
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _A ( self: List[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _A ( self: List[str] ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = RegNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def a ():
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _A ( self: str ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _A ( self: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
| 705 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE ="""%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
__SCREAMING_SNAKE_CASE =f"""https://www.google.com/search?q={query}&num=100"""
__SCREAMING_SNAKE_CASE =requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
__SCREAMING_SNAKE_CASE =(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
__SCREAMING_SNAKE_CASE =parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 89 | 0 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase ( a__ : str , a__ : List[str] ) -> Union[str, Any]:
assert isinstance(a__ , a__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( a__ : int , a__ : List[Any] , a__ : int ) -> List[Any]:
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCamelCase = JsonDatasetReader(a__ , cache_dir=a__ , keep_in_memory=a__ ).read()
_check_json_dataset(a__ , a__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase ( a__ : Optional[int] , a__ : int , a__ : List[Any] ) -> Optional[Any]:
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = features.copy() if features else default_expected_features
_UpperCamelCase = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase = JsonDatasetReader(a__ , features=a__ , cache_dir=a__ ).read()
_check_json_dataset(a__ , a__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase ( a__ : Dict , a__ : Any , a__ : Any ) -> Union[str, Any]:
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
_UpperCamelCase = features.copy() if features else default_expected_features
_UpperCamelCase = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase = JsonDatasetReader(a__ , features=a__ , cache_dir=a__ ).read()
assert isinstance(a__ , a__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase ( a__ : Any , a__ : str ) -> List[str]:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
_UpperCamelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
_UpperCamelCase = features.copy()
_UpperCamelCase = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = JsonDatasetReader(a__ , features=a__ , cache_dir=a__ ).read()
assert isinstance(a__ , a__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( a__ : Tuple , a__ : int , a__ : List[str] ) -> int:
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = JsonDatasetReader(a__ , cache_dir=a__ , split=a__ ).read()
_check_json_dataset(a__ , a__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase ( a__ : Optional[Any] , a__ : List[str] , a__ : int ) -> List[Any]:
if issubclass(a__ , a__ ):
_UpperCamelCase = jsonl_path
elif issubclass(a__ , a__ ):
_UpperCamelCase = [jsonl_path]
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = JsonDatasetReader(a__ , cache_dir=a__ ).read()
_check_json_dataset(a__ , a__ )
def lowercase ( a__ : Dict , a__ : Dict , a__ : str=("train",) ) -> Dict:
assert isinstance(a__ , a__ )
for split in splits:
_UpperCamelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( a__ : Optional[int] , a__ : Any , a__ : Any ) -> Dict:
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCamelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=a__ , keep_in_memory=a__ ).read()
_check_json_datasetdict(a__ , a__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase ( a__ : Optional[int] , a__ : Optional[int] , a__ : Dict ) -> Union[str, Any]:
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = features.copy() if features else default_expected_features
_UpperCamelCase = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCamelCase = JsonDatasetReader({'''train''': jsonl_path} , features=a__ , cache_dir=a__ ).read()
_check_json_datasetdict(a__ , a__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase ( a__ : Union[str, Any] , a__ : Any , a__ : Dict ) -> int:
if split:
_UpperCamelCase = {split: jsonl_path}
else:
_UpperCamelCase = '''train'''
_UpperCamelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_UpperCamelCase = JsonDatasetReader(a__ , cache_dir=a__ ).read()
_check_json_datasetdict(a__ , a__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase ( a__ : int ) -> List[Any]:
return json.load(a__ )
def lowercase ( a__ : Optional[int] ) -> Optional[int]:
return [json.loads(a__ ) for line in buffer]
class UpperCAmelCase_ :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , lines=__UpperCamelCase ).write()
buffer.seek(0 )
_UpperCamelCase = load_json_function(__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert isinstance(exported_content[0] , __UpperCamelCase )
assert len(__UpperCamelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def _UpperCamelCase ( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : int ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , lines=__UpperCamelCase , orient=__UpperCamelCase ).write()
buffer.seek(0 )
_UpperCamelCase = load_json(__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__UpperCamelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__UpperCamelCase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , lines=__UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
_UpperCamelCase = load_json_function(__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert isinstance(exported_content[0] , __UpperCamelCase )
assert len(__UpperCamelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] ) -> List[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , lines=__UpperCamelCase , orient=__UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
_UpperCamelCase = load_json(__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__UpperCamelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__UpperCamelCase ) == 10
def _UpperCamelCase ( self : Any , __UpperCamelCase : Optional[int] ) -> Optional[int]:
with pytest.raises(__UpperCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Any ) -> Dict:
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
_UpperCamelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , compression=__UpperCamelCase ).write()
with fsspec.open(__UpperCamelCase , '''rb''' , compression='''infer''' ) as f:
_UpperCamelCase = f.read()
with fsspec.open(__UpperCamelCase , '''rb''' , compression='''infer''' ) as f:
_UpperCamelCase = f.read()
assert exported_content == original_content
| 420 | """simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowercase ( a__ : dict , a__ : str , a__ : set , a__ : set , a__ : dict , a__ : dict , a__ : PriorityQueue , a__ : dict , a__ : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_UpperCamelCase = cst_fwd.get(a__ , np.inf )
_UpperCamelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_UpperCamelCase = new_cost_f
_UpperCamelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_UpperCamelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowercase ( a__ : str , a__ : str , a__ : dict , a__ : dict ) -> int:
_UpperCamelCase = -1
_UpperCamelCase = set()
_UpperCamelCase = set()
_UpperCamelCase = {source: 0}
_UpperCamelCase = {destination: 0}
_UpperCamelCase = {source: None}
_UpperCamelCase = {destination: None}
_UpperCamelCase = PriorityQueue()
_UpperCamelCase = PriorityQueue()
_UpperCamelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_UpperCamelCase , _UpperCamelCase = queue_forward.get()
visited_forward.add(a__ )
_UpperCamelCase , _UpperCamelCase = queue_backward.get()
visited_backward.add(a__ )
_UpperCamelCase = pass_and_relaxation(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
_UpperCamelCase = pass_and_relaxation(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_UpperCamelCase = shortest_distance
return shortest_path_distance
UpperCAmelCase = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
UpperCAmelCase = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420 | 1 |
from manim import *
class lowerCamelCase ( A_ ):
def UpperCAmelCase(self : Tuple ) -> Any:
snake_case = Rectangle(height=0.5 , width=0.5 )
snake_case = Rectangle(height=0.25 , width=0.25 )
snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case = [mem.copy() for i in range(6 )]
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*_A ).arrange(_A , buff=0 )
snake_case = VGroup(*_A ).arrange(_A , buff=0 )
snake_case = VGroup(_A , _A ).arrange(_A , buff=0 )
snake_case = Text("CPU" , font_size=2_4 )
snake_case = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_A )
snake_case = [mem.copy() for i in range(4 )]
snake_case = VGroup(*_A ).arrange(_A , buff=0 )
snake_case = Text("GPU" , font_size=2_4 )
snake_case = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
gpu.move_to([-1, -1, 0] )
self.add(_A )
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*_A ).arrange(_A , buff=0 )
snake_case = Text("Model" , font_size=2_4 )
snake_case = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
model.move_to([3, -1.0, 0] )
self.add(_A )
snake_case = []
snake_case = []
snake_case = []
for i, rect in enumerate(_A ):
rect.set_stroke(_A )
snake_case = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_A , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_A )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_A , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_A , buff=0.0 )
self.add(_A )
model_cpu_arr.append(_A )
self.add(*_A , *_A , *_A )
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*_A ).arrange(_A , buff=0 )
snake_case = Text("Loaded Checkpoint" , font_size=2_4 )
snake_case = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
checkpoint.move_to([3, 0.5, 0] )
self.add(_A )
snake_case = []
snake_case = []
for i, rect in enumerate(_A ):
snake_case = fill.copy().set_fill(_A , opacity=0.7 )
target.move_to(_A )
ckpt_arr.append(_A )
snake_case = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_A )
self.add(*_A , *_A )
snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>โ</span> Empty Model' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_A , _A )
snake_case = MarkupText(
f'<span fgcolor=\'{BLUE}\'>โ</span> Checkpoint' , font_size=1_8 , )
blue_text.next_to(_A , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_A )
snake_case = MarkupText(
f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
snake_case = [meta_mem.copy() for i in range(6 )]
snake_case = [meta_mem.copy() for i in range(6 )]
snake_case = VGroup(*_A ).arrange(_A , buff=0 )
snake_case = VGroup(*_A ).arrange(_A , buff=0 )
snake_case = VGroup(_A , _A ).arrange(_A , buff=0 )
snake_case = Text("Disk" , font_size=2_4 )
snake_case = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_A , run_time=3 ) , Write(_A , run_time=1 ) , Create(_A , run_time=1 ) )
snake_case = []
for i, rect in enumerate(_A ):
snake_case = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_A , run_time=1.5 ) )
self.play(*_A )
self.play(FadeOut(_A ) )
snake_case = MarkupText(f'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(_A , run_time=3 ) )
self.play(
FadeOut(_A , _A , *_A , *_A ) , )
self.wait()
| 294 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_A = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class lowerCamelCase :
def __init__(self : Any , _A : int = 1_4 ) -> None:
if group not in primes:
raise ValueError("Unsupported Group" )
snake_case = primes[group]["prime"]
snake_case = primes[group]["generator"]
snake_case = int(hexlify(urandom(3_2 ) ) , base=1_6 )
def UpperCAmelCase(self : Any ) -> str:
return hex(self.__private_key )[2:]
def UpperCAmelCase(self : Tuple ) -> str:
snake_case = pow(self.generator , self.__private_key , self.prime )
return hex(_A )[2:]
def UpperCAmelCase(self : Optional[int] , _A : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_A , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase(self : List[Any] , _A : str ) -> str:
snake_case = int(_A , base=1_6 )
if not self.is_valid_public_key(_A ):
raise ValueError("Invalid public key" )
snake_case = pow(_A , self.__private_key , self.prime )
return shaaaa(str(_A ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase(_A : int , _A : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_A , (prime - 1) // 2 , _A ) == 1
)
@staticmethod
def UpperCAmelCase(_A : str , _A : str , _A : int = 1_4 ) -> str:
snake_case = int(_A , base=1_6 )
snake_case = int(_A , base=1_6 )
snake_case = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(_A , _A ):
raise ValueError("Invalid public key" )
snake_case = pow(_A , _A , _A )
return shaaaa(str(_A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 | 1 |
'''simple docstring'''
lowerCAmelCase : List[Any] = range(2, 20 + 1)
lowerCAmelCase : Union[str, Any] = [10**k for k in range(ks[-1] + 1)]
lowerCAmelCase : dict[int, dict[int, list[list[int]]]] = {}
def A_( A : Any , A : Dict , A : str , A : str):
UpperCamelCase = sum(a_i[j] for j in range(A , len(A)))
UpperCamelCase = sum(a_i[j] * base[j] for j in range(min(len(A) , A)))
UpperCamelCase , UpperCamelCase = 0, 0
UpperCamelCase = n - i
UpperCamelCase = memo.get(A)
if sub_memo is not None:
UpperCamelCase = sub_memo.get(A)
if jumps is not None and len(A) > 0:
# find and make the largest jump without going over
UpperCamelCase = -1
for _k in range(len(A) - 1 , -1 , -1):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCamelCase = _k
break
if max_jump >= 0:
UpperCamelCase , UpperCamelCase , UpperCamelCase = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCamelCase = diff + c
for j in range(min(A , len(A))):
UpperCamelCase , UpperCamelCase = divmod(A , 10)
if new_c > 0:
add(A , A , A)
else:
UpperCamelCase = []
else:
UpperCamelCase = {c: []}
UpperCamelCase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCamelCase , UpperCamelCase = next_term(A , k - 1 , i + dn , A)
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCamelCase , UpperCamelCase = compute(A , A , i + dn , A)
diff += _diff
dn += terms_jumped
UpperCamelCase = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCamelCase = 0
while j < len(A):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A , (diff, dn, k))
return (diff, dn)
def A_( A : int , A : Optional[int] , A : str , A : List[str]):
if i >= n:
return 0, i
if k > len(A):
a_i.extend([0 for _ in range(k - len(A))])
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCamelCase = i
UpperCamelCase , UpperCamelCase , UpperCamelCase = 0, 0, 0
for j in range(len(A)):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCamelCase = ds_c + ds_b
diff += addend
UpperCamelCase = 0
for j in range(A):
UpperCamelCase = a_i[j] + addend
UpperCamelCase , UpperCamelCase = divmod(A , 10)
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A , A , A)
return diff, i - start_i
def A_( A : str , A : Dict , A : Tuple):
for j in range(A , len(A)):
UpperCamelCase = digits[j] + addend
if s >= 10:
UpperCamelCase , UpperCamelCase = divmod(A , 10)
UpperCamelCase = addend // 10 + quotient
else:
UpperCamelCase = s
UpperCamelCase = addend // 10
if addend == 0:
break
while addend > 0:
UpperCamelCase , UpperCamelCase = divmod(A , 10)
digits.append(A)
def A_( A : int = 10**15):
UpperCamelCase = [1]
UpperCamelCase = 1
UpperCamelCase = 0
while True:
UpperCamelCase , UpperCamelCase = next_term(A , 20 , i + dn , A)
dn += terms_jumped
if dn == n - i:
break
UpperCamelCase = 0
for j in range(len(A)):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 3 |
'''simple docstring'''
import numpy as np
def A_( A : str , A : Optional[Any] , A : Tuple , A : Optional[int] , A : str):
UpperCamelCase = int(np.ceil((x_end - xa) / h))
UpperCamelCase = np.zeros((n + 1,))
UpperCamelCase = ya
UpperCamelCase = xa
for k in range(A):
UpperCamelCase = f(A , y[k])
UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka)
UpperCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka)
UpperCamelCase = f(x + h , y[k] + h * ka)
UpperCamelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = "realm"
def __init__( self: Tuple , _lowerCamelCase: Union[str, Any]=3_05_22 , _lowerCamelCase: Tuple=7_68 , _lowerCamelCase: str=1_28 , _lowerCamelCase: str=12 , _lowerCamelCase: int=12 , _lowerCamelCase: Union[str, Any]=8 , _lowerCamelCase: Optional[Any]=30_72 , _lowerCamelCase: str="gelu_new" , _lowerCamelCase: str=0.1 , _lowerCamelCase: Union[str, Any]=0.1 , _lowerCamelCase: Optional[int]=5_12 , _lowerCamelCase: Union[str, Any]=2 , _lowerCamelCase: int=0.02 , _lowerCamelCase: Tuple=1E-12 , _lowerCamelCase: List[Any]=2_56 , _lowerCamelCase: Any=10 , _lowerCamelCase: Optional[Any]=1E-3 , _lowerCamelCase: Any=5 , _lowerCamelCase: List[str]=3_20 , _lowerCamelCase: List[str]=13_35_37_18 , _lowerCamelCase: str=50_00 , _lowerCamelCase: str=1 , _lowerCamelCase: str=0 , _lowerCamelCase: Dict=2 , **_lowerCamelCase: Tuple , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
# Common config
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = retriever_proj_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = num_candidates
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = layer_norm_eps
# Reader config
SCREAMING_SNAKE_CASE_ = span_hidden_size
SCREAMING_SNAKE_CASE_ = max_span_width
SCREAMING_SNAKE_CASE_ = reader_layer_norm_eps
SCREAMING_SNAKE_CASE_ = reader_beam_size
SCREAMING_SNAKE_CASE_ = reader_seq_len
# Retrieval config
SCREAMING_SNAKE_CASE_ = num_block_records
SCREAMING_SNAKE_CASE_ = searcher_beam_size
| 702 |
import requests
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = {'''Content-Type''': '''application/json'''}
SCREAMING_SNAKE_CASE_ = requests.post(_lowerCAmelCase , json={'''text''': message_body} , headers=_lowerCAmelCase )
if response.status_code != 2_0_0:
SCREAMING_SNAKE_CASE_ = (
'''Request to slack returned an error '''
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 89 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Any = StableDiffusionInstructPixaPixPipeline
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : Dict ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
_UpperCAmelCase = CLIPTextModel(UpperCamelCase_ )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : int=0 ):
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("RGB" )
if str(UpperCamelCase_ ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(UpperCamelCase_ )
else:
_UpperCAmelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
_UpperCAmelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase_ )
_UpperCAmelCase = sd_pipe(**UpperCamelCase_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
_UpperCAmelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase_ )
_UpperCAmelCase = 'french fries'
_UpperCAmelCase = sd_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_ )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
_UpperCAmelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase_ )
_UpperCAmelCase = [inputs['prompt']] * 2
_UpperCAmelCase = np.array(inputs["image"] ).astype(np.floataa ) / 255.0
_UpperCAmelCase = torch.from_numpy(UpperCamelCase_ ).unsqueeze(0 ).to(UpperCamelCase_ )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.permute(0 , 3 , 1 , 2 )
_UpperCAmelCase = image.repeat(2 , 1 , 1 , 1 )
_UpperCAmelCase = sd_pipe(**UpperCamelCase_ ).images
_UpperCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_UpperCAmelCase = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" )
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
_UpperCAmelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase_ )
_UpperCAmelCase = sd_pipe(**UpperCamelCase_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = [round(UpperCamelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(UpperCamelCase_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase__ ( self : Optional[int] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
_UpperCAmelCase = VaeImageProcessor(do_resize=UpperCamelCase_ , do_normalize=UpperCamelCase_ )
_UpperCAmelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_UpperCAmelCase = pipe(**self.get_dummy_inputs_by_type(UpperCamelCase_ , input_image_type="pt" ) )[0]
_UpperCAmelCase = components['vae']
_UpperCAmelCase = self.get_dummy_inputs_by_type(UpperCamelCase_ , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_UpperCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
_UpperCAmelCase = pipe(**UpperCamelCase_ )[0]
_UpperCAmelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCamelCase_ , 1e-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Tuple ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : int=0 ):
_UpperCAmelCase = torch.manual_seed(UpperCamelCase_ )
_UpperCAmelCase = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
_UpperCAmelCase = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**UpperCamelCase_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=UpperCamelCase_ )
_UpperCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**UpperCamelCase_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=UpperCamelCase_ )
_UpperCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**UpperCamelCase_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = 0
def callback_fn(__UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor ) -> None:
_UpperCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_UpperCAmelCase = False
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
pipe(**UpperCamelCase_ , callback=UpperCamelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase__ ( self : Dict ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**UpperCamelCase_ )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_UpperCAmelCase = inputs['image'].resize((504, 504) )
_UpperCAmelCase = 'timbrooks/instruct-pix2pix'
_UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
_UpperCAmelCase = pipe(**UpperCamelCase_ )
_UpperCAmelCase = output.images[0]
_UpperCAmelCase = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
_UpperCAmelCase = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 684 | '''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :list[list[str]] = [[] for _ in range(UpperCAmelCase__ )]
SCREAMING_SNAKE_CASE__ :Any = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(UpperCAmelCase__ ) <= key:
return input_string
for position, character in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ :Dict = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE__ :Tuple = min(UpperCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :int = [''.join(UpperCAmelCase__ ) for row in temp_grid]
SCREAMING_SNAKE_CASE__ :str = ''.join(UpperCAmelCase__ )
return output_string
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = []
SCREAMING_SNAKE_CASE__ :str = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
SCREAMING_SNAKE_CASE__ :list[list[str]] = [[] for _ in range(UpperCAmelCase__ )] # generates template
for position in range(len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE__ :Optional[int] = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE__ :Dict = min(UpperCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
SCREAMING_SNAKE_CASE__ :Any = 0
for row in temp_grid: # fills in the characters
SCREAMING_SNAKE_CASE__ :int = input_string[counter : counter + len(UpperCAmelCase__ )]
grid.append(list(UpperCAmelCase__ ) )
counter += len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Tuple = '' # reads as zigzag
for position in range(len(UpperCAmelCase__ ) ):
SCREAMING_SNAKE_CASE__ :Union[str, Any] = position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE__ :Any = min(UpperCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCamelCase ( UpperCAmelCase__ : str ) -> dict[int, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = {}
for key_guess in range(1 , len(UpperCAmelCase__ ) ): # tries every key
SCREAMING_SNAKE_CASE__ :List[str] = decrypt(UpperCAmelCase__ , UpperCAmelCase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 | 0 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =""""""
__UpperCAmelCase : str =(
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__UpperCAmelCase : str =None # compression type in fsspec. ex: "gzip"
__UpperCAmelCase : str =None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , __a = "" , __a = None , __a = None , **__a ):
super().__init__(self , **__a )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__lowerCAmelCase = fsspec.open(
__a , mode="rb" , protocol=__a , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__lowerCAmelCase = os.path.basename(self.file.path.split("::" )[0] )
__lowerCAmelCase = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
__lowerCAmelCase = None
@classmethod
def snake_case ( cls , __a ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__a ).lstrip("/" )
def snake_case ( self ):
if self.dir_cache is None:
__lowerCAmelCase = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
__lowerCAmelCase = {f["name"]: f}
def snake_case ( self , __a ):
return self.file.open().read()
def snake_case ( self , __a , __a = "rb" , __a=None , __a=True , __a=None , **__a , ):
__lowerCAmelCase = self._strip_protocol(__a )
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""bz2"""
__UpperCAmelCase : Any ="""bz2"""
__UpperCAmelCase : List[Any] =""".bz2"""
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple ="""gzip"""
__UpperCAmelCase : Union[str, Any] ="""gzip"""
__UpperCAmelCase : Dict =""".gz"""
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""lz4"""
__UpperCAmelCase : Any ="""lz4"""
__UpperCAmelCase : List[str] =""".lz4"""
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] ="""xz"""
__UpperCAmelCase : int ="""xz"""
__UpperCAmelCase : List[str] =""".xz"""
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] ="""zstd"""
__UpperCAmelCase : Optional[Any] ="""zstd"""
__UpperCAmelCase : str =""".zst"""
def __init__( self , __a , __a = "rb" , __a = None , __a = None , __a = DEFAULT_BLOCK_SIZE , **__a , ):
super().__init__(
fo=__a , mode=__a , target_protocol=__a , target_options=__a , block_size=__a , **__a , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__lowerCAmelCase = self.file.__enter__
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = file_
def __enter__( self ):
self._file.__enter__()
return self
def __exit__( self , *__a , **__a ):
self._file.__exit__(*__a , **__a )
def __iter__( self ):
return iter(self._file )
def snake_case ( self ):
return next(self._file )
def __getattr__( self , __a ):
return getattr(self._file , __a )
def fixed_enter(*__a , **__a ):
return WrappedFile(_enter(*__a , **__a ) )
__lowerCAmelCase = fixed_enter
| 282 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Dict = 1_6
A : Optional[int] = 3_2
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = 16 ):
'''simple docstring'''
__lowerCAmelCase = AutoTokenizer.from_pretrained("bert-base-cased" )
__lowerCAmelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase = 8
else:
__lowerCAmelCase = None
return tokenizer.pad(
_UpperCamelCase , padding="longest" , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
__lowerCAmelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : List[Any] = mocked_dataloaders # noqa: F811
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCamelCase ) == "1":
__lowerCAmelCase = 2
# Initialize accelerator
__lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config["lr"]
__lowerCAmelCase = int(config["num_epochs"] )
__lowerCAmelCase = int(config["seed"] )
__lowerCAmelCase = int(config["batch_size"] )
__lowerCAmelCase = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCamelCase )
def inner_training_loop(_UpperCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = AdamW(params=model.parameters() , lr=_UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = get_dataloaders(_UpperCamelCase , _UpperCamelCase )
# Instantiate scheduler
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase = model(**_UpperCamelCase )
__lowerCAmelCase = outputs.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**_UpperCamelCase )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
__lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCamelCase , default=_UpperCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 282 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 1
lowercase = 3
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case )
return image
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=snake_case , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet_upscale
lowercase = DDPMScheduler()
lowercase = DDIMScheduler(prediction_type='v_prediction' )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionUpscalePipeline(
unet=snake_case , low_res_scheduler=snake_case , scheduler=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , max_noise_level=350 , )
lowercase = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
lowercase = 'A painting of a squirrel eating a burger'
lowercase = torch.Generator(device=snake_case ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , image=snake_case , generator=snake_case , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowercase = output.images
lowercase = torch.Generator(device=snake_case ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , image=snake_case , generator=snake_case , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=snake_case , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
lowercase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowercase = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet_upscale
lowercase = DDPMScheduler()
lowercase = DDIMScheduler(prediction_type='v_prediction' )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionUpscalePipeline(
unet=snake_case , low_res_scheduler=snake_case , scheduler=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , max_noise_level=350 , )
lowercase = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
lowercase = 'A painting of a squirrel eating a burger'
lowercase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowercase = output.images
assert image.shape[0] == 2
lowercase = torch.Generator(device=snake_case ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , image=snake_case , generator=snake_case , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowercase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.dummy_cond_unet_upscale
lowercase = DDPMScheduler()
lowercase = DDIMScheduler(prediction_type='v_prediction' )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowercase = unet.half()
lowercase = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionUpscalePipeline(
unet=snake_case , low_res_scheduler=snake_case , scheduler=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , max_noise_level=350 , )
lowercase = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
lowercase = 'A painting of a squirrel eating a burger'
lowercase = torch.manual_seed(0 )
lowercase = sd_pipe(
[prompt] , image=snake_case , generator=snake_case , num_inference_steps=2 , output_type='np' , ).images
lowercase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowercase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
lowercase = 'stabilityai/stable-diffusion-x4-upscaler'
lowercase = StableDiffusionUpscalePipeline.from_pretrained(snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
lowercase = 'a cat sitting on a park bench'
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=snake_case , image=snake_case , generator=snake_case , output_type='np' , )
lowercase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowercase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
lowercase = 'stabilityai/stable-diffusion-x4-upscaler'
lowercase = StableDiffusionUpscalePipeline.from_pretrained(
snake_case , torch_dtype=torch.floataa , )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
lowercase = 'a cat sitting on a park bench'
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=snake_case , image=snake_case , generator=snake_case , output_type='np' , )
lowercase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def SCREAMING_SNAKE_CASE__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowercase = 'stabilityai/stable-diffusion-x4-upscaler'
lowercase = StableDiffusionUpscalePipeline.from_pretrained(
snake_case , torch_dtype=torch.floataa , )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase = 'a cat sitting on a park bench'
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=snake_case , image=snake_case , generator=snake_case , num_inference_steps=5 , output_type='np' , )
lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 84 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
UpperCamelCase__ : Any = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
UpperCamelCase__ : Optional[Any] = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase__ : Union[str, Any] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
UpperCamelCase__ : Tuple = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(UpperCamelCase__ )-1}''' )
if "norm" in key:
UpperCamelCase__ : Dict = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase__ : int = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
UpperCamelCase__ : Tuple = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(UpperCamelCase__ )-1}''' )
if "layer_norm1" in key:
UpperCamelCase__ : Tuple = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
UpperCamelCase__ : Union[str, Any] = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase__ : Optional[Any] = key[key.find('''block''' ) + len('''block''' )]
UpperCamelCase__ : str = key.replace(f'''block{idx}''' , f'''block.{int(UpperCamelCase__ )-1}''' )
if "attn.q" in key:
UpperCamelCase__ : Dict = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
UpperCamelCase__ : Any = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
UpperCamelCase__ : List[Any] = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
UpperCamelCase__ : int = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
UpperCamelCase__ : str = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
UpperCamelCase__ : List[str] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
UpperCamelCase__ : Union[str, Any] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
UpperCamelCase__ : Optional[int] = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase__ : str = key[key.find('''linear_c''' ) + len('''linear_c''' )]
UpperCamelCase__ : Tuple = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(UpperCamelCase__ )-1}''' )
if "bot_conv" in key:
UpperCamelCase__ : List[Any] = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
UpperCamelCase__ : str = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
UpperCamelCase__ : Tuple = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
UpperCamelCase__ : List[Any] = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
UpperCamelCase__ : Tuple = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
UpperCamelCase__ : int = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
UpperCamelCase__ : List[str] = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
UpperCamelCase__ : Tuple = key.replace('''module.last_layer_depth''' , '''head.head''' )
UpperCamelCase__ : List[str] = value
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase__ : Optional[Any] = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
UpperCamelCase__ : int = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
UpperCamelCase__ : Union[str, Any] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase__ : Tuple = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase__ : Tuple = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase__ : Optional[Any] = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Any = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=None ):
UpperCamelCase__ : Any = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
UpperCamelCase__ : str = GLPNImageProcessor()
# prepare image
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : List[str] = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ : Tuple = torch.load(UpperCamelCase__ , map_location=torch.device('''cpu''' ) )
# rename keys
UpperCamelCase__ : Optional[Any] = rename_keys(UpperCamelCase__ )
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase__ , UpperCamelCase__ )
# create HuggingFace model and load state dict
UpperCamelCase__ : str = GLPNForDepthEstimation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# forward pass
UpperCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )
UpperCamelCase__ : Optional[int] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase__ : Tuple = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCamelCase__ : Union[str, Any] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
UpperCamelCase__ : Optional[Any] = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCamelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCamelCase__ , )
if __name__ == "__main__":
lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
lowerCamelCase =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 285 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : int = """sew-d"""
def __init__( self: Dict , snake_case: List[Any]=32 , snake_case: str=768 , snake_case: Union[str, Any]=12 , snake_case: Tuple=12 , snake_case: Tuple=3_072 , snake_case: Any=2 , snake_case: List[Any]=512 , snake_case: int=256 , snake_case: Union[str, Any]=True , snake_case: Optional[int]=True , snake_case: Any=("p2c", "c2p") , snake_case: Any="layer_norm" , snake_case: List[str]="gelu_python" , snake_case: List[str]=0.1 , snake_case: str=0.1 , snake_case: List[Any]=0.1 , snake_case: Union[str, Any]=0.0 , snake_case: List[str]=0.1 , snake_case: List[str]=0.0_2 , snake_case: Optional[Any]=1E-7 , snake_case: Optional[int]=1E-5 , snake_case: Any="group" , snake_case: Union[str, Any]="gelu" , snake_case: Optional[Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case: str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case: Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case: Optional[Any]=False , snake_case: List[Any]=128 , snake_case: Optional[Any]=16 , snake_case: Union[str, Any]=True , snake_case: Union[str, Any]=0.0_5 , snake_case: Optional[Any]=10 , snake_case: List[str]=2 , snake_case: Optional[Any]=0.0 , snake_case: List[Any]=10 , snake_case: Dict=0 , snake_case: Dict="mean" , snake_case: Optional[Any]=False , snake_case: Dict=False , snake_case: List[Any]=256 , snake_case: Any=0 , snake_case: Optional[Any]=1 , snake_case: Union[str, Any]=2 , **snake_case: Optional[Any] , ) -> int:
super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case )
snake_case_ :str = hidden_size
snake_case_ :Union[str, Any] = feat_extract_norm
snake_case_ :Union[str, Any] = feat_extract_activation
snake_case_ :List[Any] = list(snake_case )
snake_case_ :List[str] = list(snake_case )
snake_case_ :Dict = list(snake_case )
snake_case_ :Union[str, Any] = conv_bias
snake_case_ :List[str] = num_conv_pos_embeddings
snake_case_ :Union[str, Any] = num_conv_pos_embedding_groups
snake_case_ :Union[str, Any] = len(self.conv_dim )
snake_case_ :List[Any] = num_hidden_layers
snake_case_ :str = intermediate_size
snake_case_ :Any = squeeze_factor
snake_case_ :str = max_position_embeddings
snake_case_ :Union[str, Any] = position_buckets
snake_case_ :Union[str, Any] = share_att_key
snake_case_ :Union[str, Any] = relative_attention
snake_case_ :Tuple = norm_rel_ebd
snake_case_ :Optional[int] = list(snake_case )
snake_case_ :Optional[Any] = hidden_act
snake_case_ :Any = num_attention_heads
snake_case_ :Optional[Any] = hidden_dropout
snake_case_ :int = attention_dropout
snake_case_ :int = activation_dropout
snake_case_ :Dict = feat_proj_dropout
snake_case_ :Optional[int] = final_dropout
snake_case_ :Optional[Any] = layer_norm_eps
snake_case_ :Tuple = feature_layer_norm_eps
snake_case_ :List[Any] = initializer_range
snake_case_ :Optional[int] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ :Tuple = apply_spec_augment
snake_case_ :List[Any] = mask_time_prob
snake_case_ :int = mask_time_length
snake_case_ :Optional[Any] = mask_time_min_masks
snake_case_ :List[str] = mask_feature_prob
snake_case_ :Optional[int] = mask_feature_length
snake_case_ :Optional[Any] = mask_feature_min_masks
# ctc loss
snake_case_ :List[Any] = ctc_loss_reduction
snake_case_ :str = ctc_zero_infinity
# sequence classification
snake_case_ :str = use_weighted_layer_sum
snake_case_ :Dict = classifier_proj_size
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 310 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Any , snake_case: Dict=2 , snake_case: Union[str, Any]=3 , snake_case: Dict=64 , snake_case: Union[str, Any]=None ) -> Union[str, Any]:
snake_case_ :List[Any] = np.random.default_rng(snake_case )
snake_case_ :Optional[Any] = length
snake_case_ :str = rng.normal(size=(length,) ).astype(np.floataa )
snake_case_ :Optional[int] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self: Any ) -> Union[str, Any]:
return self.length
def __getitem__( self: Optional[int] , snake_case: Union[str, Any] ) -> Optional[Any]:
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self: int , snake_case: Optional[Any]=0 , snake_case: Tuple=0 , snake_case: List[Any]=False ) -> Optional[int]:
super().__init__()
snake_case_ :str = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case_ :Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case_ :Tuple = True
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Optional[Any]=None ) -> List[str]:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
snake_case_ :Union[str, Any] = False
return x * self.a[0] + self.b[0]
class lowerCamelCase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self: str , snake_case: List[Any]=0 , snake_case: Tuple=0 , snake_case: List[str]=False ) -> int:
super().__init__()
snake_case_ :int = torch.nn.Parameter(torch.tensor(snake_case ).float() )
snake_case_ :List[str] = torch.nn.Parameter(torch.tensor(snake_case ).float() )
snake_case_ :List[Any] = True
def lowerCAmelCase_ ( self: Tuple , snake_case: Optional[int]=None ) -> Union[str, Any]:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
snake_case_ :List[str] = False
return x * self.a + self.b
def A_ ( _lowercase, _lowercase = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
snake_case_ :Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" )
snake_case_ :Optional[int] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
snake_case_ :Union[str, Any] = load_dataset("""csv""", data_files=_lowercase )
snake_case_ :List[str] = datasets["""train"""].unique("""label""" )
snake_case_ :Any = {v: i for i, v in enumerate(_lowercase )}
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ :Dict = tokenizer(
examples["""sentence1"""], examples["""sentence2"""], truncation=_lowercase, max_length=_lowercase, padding="""max_length""" )
if "label" in examples:
snake_case_ :Union[str, Any] = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ :Any = datasets.map(
_lowercase, batched=_lowercase, remove_columns=["""sentence1""", """sentence2""", """label"""], )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase, padding="""max_length""", max_length=128, return_tensors="""pt""" )
return tokenizer.pad(_lowercase, padding="""longest""", return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case_ :str = DataLoader(tokenized_datasets["""train"""], shuffle=_lowercase, collate_fn=_lowercase, batch_size=2 )
snake_case_ :Any = DataLoader(tokenized_datasets["""validation"""], shuffle=_lowercase, collate_fn=_lowercase, batch_size=1 )
return train_dataloader, eval_dataloader
| 310 | 1 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
__lowercase = '''src/transformers'''
# Matches is_xxx_available()
__lowercase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__lowercase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowercase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__lowercase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__lowercase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowercase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__lowercase = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowercase = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__lowercase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__lowercase = re.compile(R'''^\s*try:''')
# Catches a line with else:
__lowercase = re.compile(R'''^\s*else:''')
def snake_case__ ( _A: str ) -> Dict:
'''simple docstring'''
if _re_test_backend.search(_A ) is None:
return None
lowerCAmelCase = [b[0] for b in _re_backend.findall(_A )]
backends.sort()
return "_and_".join(_A )
def snake_case__ ( _A: List[str] ) -> Dict:
'''simple docstring'''
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = 0
while line_index < len(_A ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_A ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_A ):
lowerCAmelCase = _re_one_line_import_struct.search(_A ).groups()[0]
lowerCAmelCase = re.findall(r"""\[([^\]]+)\]""" , _A )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
lowerCAmelCase = _re_import_struct_key_value.search(_A )
if single_line_import_search is not None:
lowerCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_A ) > 0]
objects.extend(_A )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
lowerCAmelCase = lines[line_index]
if _re_import_struct_add_one.search(_A ) is not None:
objects.append(_re_import_struct_add_one.search(_A ).groups()[0] )
elif _re_import_struct_add_many.search(_A ) is not None:
lowerCAmelCase = _re_import_struct_add_many.search(_A ).groups()[0].split(""", """ )
lowerCAmelCase = [obj[1:-1] for obj in imports if len(_A ) > 0]
objects.extend(_A )
elif _re_between_brackets.search(_A ) is not None:
lowerCAmelCase = _re_between_brackets.search(_A ).groups()[0].split(""", """ )
lowerCAmelCase = [obj[1:-1] for obj in imports if len(_A ) > 0]
objects.extend(_A )
elif _re_quote_object.search(_A ) is not None:
objects.append(_re_quote_object.search(_A ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase = []
while (
line_index < len(_A )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
lowerCAmelCase = lines[line_index]
lowerCAmelCase = _re_import.search(_A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(_A ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
lowerCAmelCase = lines[line_index]
lowerCAmelCase = _re_import.search(_A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case__ ( _A: int , _A: List[str] ) -> int:
'''simple docstring'''
def find_duplicates(_A: int ):
return [k for k, v in collections.Counter(_A ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase = []
for key in import_dict_objects.keys():
lowerCAmelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase = """base imports""" if key == """none""" else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
lowerCAmelCase = os.path.join(_A , """__init__.py""" )
lowerCAmelCase = parse_init(_A )
if objects is not None:
lowerCAmelCase = analyze_results(*_A )
if len(_A ) > 0:
lowerCAmelCase = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(_A ) )
if len(_A ) > 0:
raise ValueError("""\n\n""".join(_A ) )
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = []
for path, directories, files in os.walk(_A ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_A )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_A ) / folder).glob("""*.py""" ) ) ) == 0:
continue
lowerCAmelCase = str((Path(_A ) / folder).relative_to(_A ) )
lowerCAmelCase = short_path.replace(os.path.sep , """.""" )
submodules.append(_A )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase = str((Path(_A ) / fname).relative_to(_A ) )
lowerCAmelCase = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_A )
return submodules
__lowercase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
from transformers.utils import direct_transformers_import
lowerCAmelCase = direct_transformers_import(_A )
lowerCAmelCase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_A , """__init__.py""" ) , """r""" ) as f:
lowerCAmelCase = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , _A ) ) )
lowerCAmelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_A ) > 0:
lowerCAmelCase = """\n""".join(f"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 370 | '''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
lowerCAmelCase = DisjunctiveConstraint(__lowerCAmelCase)
self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase))
with self.assertRaises(__lowerCAmelCase):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(__lowerCAmelCase):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCAmelCase):
DisjunctiveConstraint(__lowerCAmelCase) # fails here
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [[1, 2, 3], [1, 2, 4]]
lowerCAmelCase = DisjunctiveConstraint(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = dc.update(1)
lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = dc.update(2)
lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = dc.update(3)
lowerCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCAmelCase)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCAmelCase = DisjunctiveConstraint(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 370 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ = 6 ) -> None:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
self.create_linked_list(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = Node()
SCREAMING_SNAKE_CASE = current_node
SCREAMING_SNAKE_CASE = current_node
SCREAMING_SNAKE_CASE = current_node
for _ in range(1 , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = Node()
SCREAMING_SNAKE_CASE = current_node
SCREAMING_SNAKE_CASE = previous_node
SCREAMING_SNAKE_CASE = current_node
SCREAMING_SNAKE_CASE = self.front
SCREAMING_SNAKE_CASE = previous_node
def __A ( self ) -> bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __A ( self ) -> Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __A ( self , lowerCAmelCase__ ) -> None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
SCREAMING_SNAKE_CASE = self.rear.next
if self.rear:
SCREAMING_SNAKE_CASE = data
def __A ( self ) -> Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
SCREAMING_SNAKE_CASE = self.front.data
SCREAMING_SNAKE_CASE = None
return data
SCREAMING_SNAKE_CASE = self.front
SCREAMING_SNAKE_CASE = old_front.next
SCREAMING_SNAKE_CASE = old_front.data
SCREAMING_SNAKE_CASE = None
return data
def __A ( self ) -> None:
if self.is_empty():
raise Exception('Empty Queue' )
def __A ( self ) -> None:
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue' )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self ) -> None:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 327 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
UpperCAmelCase = int(number**0.5 )
return number == sq * sq
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase = x_den * y_den * z_den
UpperCAmelCase = gcd(__lowerCamelCase , __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 35 ) -> str:
"""simple docstring"""
UpperCAmelCase = set()
UpperCAmelCase = 42
UpperCAmelCase = Fraction(0 )
UpperCAmelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase = x_num * y_den + x_den * y_num
UpperCAmelCase = x_den * y_den
UpperCAmelCase = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
UpperCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
UpperCAmelCase = int(sqrt(__lowerCamelCase ) )
UpperCAmelCase = int(sqrt(__lowerCamelCase ) )
UpperCAmelCase = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
UpperCAmelCase = x_num * y_num
UpperCAmelCase = x_den * y_num + x_num * y_den
UpperCAmelCase = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
UpperCAmelCase = x_num * x_num * y_num * y_num
UpperCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
UpperCAmelCase = int(sqrt(__lowerCamelCase ) )
UpperCAmelCase = int(sqrt(__lowerCamelCase ) )
UpperCAmelCase = gcd(__lowerCamelCase , __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase = add_three(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase , __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 51 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase : int ={
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class a_ ( _lowerCAmelCase ):
__A = "swin2sr"
__A = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[Any] , lowercase : Union[str, Any]=64 , lowercase : Optional[int]=1 , lowercase : List[Any]=3 , lowercase : Tuple=180 , lowercase : Optional[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Optional[Any]=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : Union[str, Any]=2.0 , lowercase : List[Any]=True , lowercase : Optional[int]=0.0 , lowercase : List[Any]=0.0 , lowercase : Optional[int]=0.1 , lowercase : str="gelu" , lowercase : str=False , lowercase : Optional[Any]=0.02 , lowercase : List[Any]=1e-5 , lowercase : int=2 , lowercase : Union[str, Any]=1.0 , lowercase : List[Any]="1conv" , lowercase : Optional[int]="pixelshuffle" , **lowercase : Optional[int] , ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :str = image_size
lowercase_ :int = patch_size
lowercase_ :Tuple = num_channels
lowercase_ :str = embed_dim
lowercase_ :int = depths
lowercase_ :Tuple = len(lowercase )
lowercase_ :Tuple = num_heads
lowercase_ :Any = window_size
lowercase_ :List[str] = mlp_ratio
lowercase_ :int = qkv_bias
lowercase_ :int = hidden_dropout_prob
lowercase_ :Optional[int] = attention_probs_dropout_prob
lowercase_ :int = drop_path_rate
lowercase_ :Tuple = hidden_act
lowercase_ :Tuple = use_absolute_embeddings
lowercase_ :int = layer_norm_eps
lowercase_ :List[Any] = initializer_range
lowercase_ :Tuple = upscale
lowercase_ :Any = img_range
lowercase_ :Optional[Any] = resi_connection
lowercase_ :Optional[int] = upsampler
| 172 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def __lowercase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(lowerCamelCase_ ) )
def __lowercase ( lowerCamelCase_ : Sequence[float] , lowerCamelCase_ : float ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0.0
for coeff in reversed(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 700 |
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_lowerCamelCase = datasets.logging.get_logger(__name__)
_lowerCamelCase = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
_lowerCamelCase = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
_lowerCamelCase = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
_lowerCamelCase = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." )
SCREAMING_SNAKE_CASE__ = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE__ = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE__ = self.config_name.upper()
else:
raise KeyError(
f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
SCREAMING_SNAKE_CASE__ = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
SCREAMING_SNAKE_CASE__ = score.BleurtScorer(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = self.scorer.score(references=UpperCAmelCase__ , candidates=UpperCAmelCase__ )
return {"scores": scores}
| 112 | 0 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , __lowerCamelCase : int = 128 , __lowerCamelCase : int = 256 , __lowerCamelCase : float = 2_000.0 , __lowerCamelCase : int = 768 , __lowerCamelCase : int = 12 , __lowerCamelCase : int = 12 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 2048 , __lowerCamelCase : float = 0.1 , ):
super().__init__()
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
SCREAMING_SNAKE_CASE = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
SCREAMING_SNAKE_CASE = nn.Dropout(p=__lowerCamelCase )
SCREAMING_SNAKE_CASE = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
SCREAMING_SNAKE_CASE = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = TaLayerNorm(__lowerCamelCase )
SCREAMING_SNAKE_CASE = nn.Dropout(p=__lowerCamelCase )
SCREAMING_SNAKE_CASE = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _snake_case ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
SCREAMING_SNAKE_CASE = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
SCREAMING_SNAKE_CASE = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
SCREAMING_SNAKE_CASE = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
SCREAMING_SNAKE_CASE = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
SCREAMING_SNAKE_CASE = self.position_encoding(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
SCREAMING_SNAKE_CASE = self.dropout(__lowerCamelCase )
# decoder: No padding present.
SCREAMING_SNAKE_CASE = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
SCREAMING_SNAKE_CASE = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
SCREAMING_SNAKE_CASE = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
SCREAMING_SNAKE_CASE = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
SCREAMING_SNAKE_CASE = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE = self.decoder_norm(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.post_dropout(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.spec_out(__lowerCamelCase )
return spec_out
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=1e-6 ):
super().__init__()
SCREAMING_SNAKE_CASE = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def _snake_case ( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]=None , ):
SCREAMING_SNAKE_CASE = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
SCREAMING_SNAKE_CASE = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
SCREAMING_SNAKE_CASE = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE = TaLayerNorm(__lowerCamelCase )
SCREAMING_SNAKE_CASE = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
SCREAMING_SNAKE_CASE = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
SCREAMING_SNAKE_CASE = nn.Dropout(__lowerCamelCase )
def _snake_case ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=None , ):
# pre_self_attention_layer_norm
SCREAMING_SNAKE_CASE = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
SCREAMING_SNAKE_CASE = self.attention(__lowerCamelCase )
SCREAMING_SNAKE_CASE = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ):
super().__init__()
SCREAMING_SNAKE_CASE = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
SCREAMING_SNAKE_CASE = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
SCREAMING_SNAKE_CASE = nn.Dropout(__lowerCamelCase )
def _snake_case ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=None , __lowerCamelCase : Dict=None , ):
SCREAMING_SNAKE_CASE = self.layer_norm(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
SCREAMING_SNAKE_CASE = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
super().__init__()
SCREAMING_SNAKE_CASE = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
SCREAMING_SNAKE_CASE = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
SCREAMING_SNAKE_CASE = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
SCREAMING_SNAKE_CASE = nn.Dropout(__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Tuple=None ):
SCREAMING_SNAKE_CASE = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE = self.film(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.DenseReluDense(__lowerCamelCase )
SCREAMING_SNAKE_CASE = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
SCREAMING_SNAKE_CASE = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
SCREAMING_SNAKE_CASE = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
SCREAMING_SNAKE_CASE = nn.Dropout(__lowerCamelCase )
SCREAMING_SNAKE_CASE = NewGELUActivation()
def _snake_case ( self : str , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = self.act(self.wi_a(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = self.wi_a(__lowerCamelCase )
SCREAMING_SNAKE_CASE = hidden_gelu * hidden_linear
SCREAMING_SNAKE_CASE = self.dropout(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.wo(__lowerCamelCase )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=1e-6 ):
super().__init__()
SCREAMING_SNAKE_CASE = nn.Parameter(torch.ones(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = eps
def _snake_case ( self : List[str] , __lowerCamelCase : str ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
SCREAMING_SNAKE_CASE = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
SCREAMING_SNAKE_CASE = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
SCREAMING_SNAKE_CASE = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def _snake_case ( self : str , __lowerCamelCase : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def _snake_case ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = self.scale_bias(__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.chunk(__lowerCamelCase , 2 , -1 )
SCREAMING_SNAKE_CASE = x * (1 + scale) + shift
return x | 16 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
def count_of_possible_combinations(UpperCamelCase__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
def count_of_possible_combinations_with_dp_array(
UpperCamelCase__ , UpperCamelCase__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__lowerCamelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase__ )
for item in array )
__lowerCamelCase = answer
return answer
__lowerCamelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
__lowerCamelCase = [0] * (target + 1)
__lowerCamelCase = 1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase =3
__UpperCAmelCase =5
__UpperCAmelCase =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 546 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCamelCase__ : List[str] = TypeVar('''T''')
class lowerCamelCase_ ( Generic[T] ):
def __init__( self : Any , _A : T ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = data
UpperCAmelCase__ : Node[T] | None = None
def __str__( self : str ):
'''simple docstring'''
return f"""{self.data}"""
class lowerCamelCase_ ( Generic[T] ):
def __init__( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Node[T] | None = None
def __iter__( self : str ):
'''simple docstring'''
UpperCAmelCase__ : str = self.top
while node:
yield node.data
UpperCAmelCase__ : int = node.next
def __str__( self : Optional[int] ):
'''simple docstring'''
return "->".join([str(_A ) for item in self] )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return self.top is None
def lowercase_ ( self : Union[str, Any] , _A : T ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = Node(_A )
if not self.is_empty():
UpperCAmelCase__ : int = self.top
UpperCAmelCase__ : List[Any] = node
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , _A )
UpperCAmelCase__ : Optional[int] = self.top
UpperCAmelCase__ : List[Any] = self.top.next
return pop_node.data
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 708 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = args.pruning_method
UpperCAmelCase__ : List[str] = args.threshold
UpperCAmelCase__ : str = args.model_name_or_path.rstrip('''/''' )
UpperCAmelCase__ : List[str] = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
UpperCAmelCase__ : str = torch.load(os.path.join(lowerCAmelCase__ , '''pytorch_model.bin''' ) )
UpperCAmelCase__ : Optional[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
UpperCAmelCase__ : Dict = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
UpperCAmelCase__ : Union[str, Any] = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
UpperCAmelCase__ : Dict = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
UpperCAmelCase__ : str = MagnitudeBinarizer.apply(inputs=lowerCAmelCase__ , threshold=lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
UpperCAmelCase__ : Optional[Any] = name[:-6]
UpperCAmelCase__ : Tuple = model[F"""{prefix_}mask_scores"""]
UpperCAmelCase__ : Optional[Any] = TopKBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Any = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
UpperCAmelCase__ : Dict = name[:-6]
UpperCAmelCase__ : Union[str, Any] = model[F"""{prefix_}mask_scores"""]
UpperCAmelCase__ : Optional[Any] = ThresholdBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
UpperCAmelCase__ : int = name[:-6]
UpperCAmelCase__ : Dict = model[F"""{prefix_}mask_scores"""]
UpperCAmelCase__ , UpperCAmelCase__ : Any = -0.1, 1.1
UpperCAmelCase__ : List[str] = torch.sigmoid(lowerCAmelCase__ )
UpperCAmelCase__ : Any = s * (r - l) + l
UpperCAmelCase__ : Tuple = s_bar.clamp(min=0.0 , max=1.0 )
UpperCAmelCase__ : Union[str, Any] = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
UpperCAmelCase__ : List[str] = os.path.join(
os.path.dirname(lowerCAmelCase__ ) , F"""bertarized_{os.path.basename(lowerCAmelCase__ )}""" )
if not os.path.isdir(lowerCAmelCase__ ):
shutil.copytree(lowerCAmelCase__ , lowerCAmelCase__ )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
UpperCamelCase__ = parser.parse_args()
main(args)
| 312 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ : int = 16
__magic_name__ : List[str] = 32
def a_ ( __lowerCAmelCase , __lowerCAmelCase = 16 ):
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
lowercase_ , padding='''longest''' , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ , drop_last=lowercase_ )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def a_ ( __lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config["""lr"""]
lowerCAmelCase__ = int(config['''num_epochs'''] )
lowerCAmelCase__ = int(config['''seed'''] )
lowerCAmelCase__ = int(config['''batch_size'''] )
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ = MAX_GPU_BATCH_SIZE
set_seed(lowercase_ )
lowerCAmelCase__ = get_dataloaders(lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=lowercase_ )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=1_00 , num_training_steps=(len(lowercase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ = model(**lowercase_ )
lowerCAmelCase__ = outputs.loss
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**lowercase_ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowercase_ )
def a_ ( ):
lowerCAmelCase__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowercase_ , default=lowercase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 615 |
import collections
import os
import re
from pathlib import Path
A_ : List[str] = 'src/transformers'
# Matches is_xxx_available()
A_ : Any = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
A_ : Optional[int] = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
A_ : Dict = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
A_ : Dict = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
A_ : Tuple = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
A_ : List[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
A_ : Dict = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
A_ : Tuple = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
A_ : Union[str, Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
A_ : Any = re.compile(r'^\s*try:')
# Catches a line with else:
A_ : Optional[Any] = re.compile(r'^\s*else:')
def UpperCamelCase (lowercase_: Optional[Any] ) -> Any:
if _re_test_backend.search(lowercase_ ) is None:
return None
A__ : Optional[int] = [b[0] for b in _re_backend.findall(lowercase_ )]
backends.sort()
return "_and_".join(lowercase_ )
def UpperCamelCase (lowercase_: Any ) -> Dict:
with open(lowercase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ : Optional[Any] = f.readlines()
A__ : Optional[Any] = 0
while line_index < len(lowercase_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase_ ):
return None
# First grab the objects without a specific backend in _import_structure
A__ : List[Any] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
A__ : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase_ ):
A__ : str = _re_one_line_import_struct.search(lowercase_ ).groups()[0]
A__ : Union[str, Any] = re.findall(r"""\[([^\]]+)\]""" , lowercase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
A__ : int = _re_import_struct_key_value.search(lowercase_ )
if single_line_import_search is not None:
A__ : int = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
A__ : str = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
A__ : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
A__ : Any = lines[line_index]
if _re_import_struct_add_one.search(lowercase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase_ ) is not None:
A__ : Any = _re_import_struct_add_many.search(lowercase_ ).groups()[0].split(""", """ )
A__ : int = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_between_brackets.search(lowercase_ ) is not None:
A__ : Any = _re_between_brackets.search(lowercase_ ).groups()[0].split(""", """ )
A__ : Any = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_quote_object.search(lowercase_ ) is not None:
objects.append(_re_quote_object.search(lowercase_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
A__ : List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A__ : Any = []
while (
line_index < len(lowercase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
A__ : Dict = lines[line_index]
A__ : Any = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
A__ : List[str] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
A__ : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A__ : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A__ : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
A__ : Union[str, Any] = lines[line_index]
A__ : List[str] = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
A__ : Tuple = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Union[str, Any] ) -> List[Any]:
def find_duplicates(lowercase_: Tuple ):
return [k for k, v in collections.Counter(lowercase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A__ : str = []
for key in import_dict_objects.keys():
A__ : Optional[int] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
A__ : Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A__ : Tuple = """base imports""" if key == """none""" else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def UpperCamelCase () -> str:
A__ : str = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
A__ : Tuple = os.path.join(lowercase_ , """__init__.py""" )
A__ : Union[str, Any] = parse_init(lowercase_ )
if objects is not None:
A__ : List[Any] = analyze_results(*lowercase_ )
if len(lowercase_ ) > 0:
A__ : int = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("""\n""".join(lowercase_ ) )
if len(lowercase_ ) > 0:
raise ValueError("""\n\n""".join(lowercase_ ) )
def UpperCamelCase () -> Dict:
A__ : int = []
for path, directories, files in os.walk(lowercase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowercase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
A__ : Union[str, Any] = str((Path(lowercase_ ) / folder).relative_to(lowercase_ ) )
A__ : List[str] = short_path.replace(os.path.sep , """.""" )
submodules.append(lowercase_ )
for fname in files:
if fname == "__init__.py":
continue
A__ : Any = str((Path(lowercase_ ) / fname).relative_to(lowercase_ ) )
A__ : Union[str, Any] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowercase_ )
return submodules
A_ : str = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def UpperCamelCase () -> str:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
A__ : Any = direct_transformers_import(lowercase_ )
A__ : List[str] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowercase_ , """__init__.py""" ) , """r""" ) as f:
A__ : str = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , lowercase_ ) ) )
A__ : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowercase_ ) > 0:
A__ : Dict = """\n""".join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f"""{list_of_modules}\n"""
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 456 | 0 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _snake_case ( *snake_case , **snake_case ) -> int:
"""simple docstring"""
pass
def _A ( lowerCamelCase ):
a__ : int = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _A ( lowerCamelCase ):
a__ : int = np.array(lowerCamelCase )
a__ : List[str] = npimg.shape
return {"hash": hashimage(lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
_UpperCamelCase : Dict = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_UpperCamelCase : Dict = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _snake_case ( self , snake_case , snake_case , snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : Dict = MaskGenerationPipeline(model=snake_case , image_processor=snake_case )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _snake_case ( self , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
pass
@slow
@require_torch
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Optional[int] = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
a__ : Dict = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
a__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(snake_case ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.9_967},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.9_909},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.9_879},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.9_834},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.9_716},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.9_612},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.9_599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.9_552},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.9_532},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.9_516},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.9_499},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.9_483},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.9_464},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.9_408},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.9_335},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.9_326},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.9_262},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.8_999},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.8_986},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.8_984},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.8_873},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Any = "facebook/sam-vit-huge"
a__ : List[Any] = pipeline("mask-generation" , model=snake_case )
a__ : Dict = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
a__ : str = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(snake_case ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_210},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_053},
] , )
| 629 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """mobilenet_v2"""
def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int:
"""simple docstring"""
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a__ : str = num_channels
a__ : Dict = image_size
a__ : Any = depth_multiplier
a__ : str = depth_divisible_by
a__ : Optional[int] = min_depth
a__ : Dict = expand_ratio
a__ : str = output_stride
a__ : Optional[int] = first_layer_is_expansion
a__ : Union[str, Any] = finegrained_output
a__ : Union[str, Any] = hidden_act
a__ : str = tf_padding
a__ : List[Any] = classifier_dropout_prob
a__ : List[Any] = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : str = semantic_loss_ignore_index
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-4
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[Any] = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = DownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
def lowerCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
snake_case_ = [-0.0_232, -0.9_869, 0.8_054, -0.0_637, -0.1_688, -1.4_264, 0.4_470, -1.3_394, 0.0_904]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ResnetDownsampleBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
def lowerCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = [0.0_710, 0.2_410, -0.7_320, -1.0_757, -1.1_343, 0.3_540, -0.0_133, -0.2_576, 0.0_948]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnDownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ = [0.0_636, 0.8_964, -0.6_234, -1.0_131, 0.0_844, 0.4_935, 0.3_437, 0.0_911, -0.2_957]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CrossAttnDownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
def lowerCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
snake_case_ , snake_case_ = super().prepare_init_args_and_inputs_for_common()
snake_case_ = 3_2
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case_ = [0.2_238, -0.7_396, -0.2_255, -0.3_829, 0.1_925, 1.1_665, 0.0_603, -0.7_295, 0.1_983]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SimpleCrossAttnDownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
@property
def lowerCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=_lowerCAmelCase )
def lowerCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ = super().prepare_init_args_and_inputs_for_common()
snake_case_ = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def lowerCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = [0.7_921, -0.0_992, -0.1_962, -0.7_695, -0.4_242, 0.7_804, 0.4_737, 0.2_765, 0.3_338]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SkipDownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
snake_case_ = [-0.0_845, -0.2_087, -0.2_465, 0.0_971, 0.1_900, -0.0_484, 0.2_664, 0.4_179, 0.5_069]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnSkipDownBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
@property
def lowerCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case_ = [0.5_539, 0.1_609, 0.4_924, 0.0_537, -0.1_995, 0.4_050, 0.0_979, -0.2_721, -0.0_642]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = DownEncoderBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
@property
def lowerCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case_ = {
"in_channels": 3_2,
"out_channels": 3_2,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
snake_case_ = [1.1_102, 0.5_302, 0.4_872, -0.0_023, -0.8_042, 0.0_483, -0.3_489, -0.5_632, 0.7_626]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnDownEncoderBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'down'
@property
def lowerCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=_lowerCAmelCase )
def lowerCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
snake_case_ = {
"in_channels": 3_2,
"out_channels": 3_2,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
snake_case_ = [0.8_966, -0.1_486, 0.8_568, 0.8_141, -0.9_046, -0.1_342, -0.0_972, -0.7_417, 0.1_538]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetMidBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'mid'
def lowerCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
snake_case_ = {
"in_channels": 3_2,
"temb_channels": 1_2_8,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case_ = [-0.1_062, 1.7_248, 0.3_494, 1.4_569, -0.0_910, -1.2_421, -0.9_984, 0.6_736, 1.0_028]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetMidBlockaDCrossAttn # noqa F405
_SCREAMING_SNAKE_CASE = 'mid'
def lowerCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = super().prepare_init_args_and_inputs_for_common()
snake_case_ = 3_2
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
snake_case_ = [0.0_187, 2.4_220, 0.4_484, 1.1_203, -0.6_121, -1.5_122, -0.8_270, 0.7_851, 1.8_335]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetMidBlockaDSimpleCrossAttn # noqa F405
_SCREAMING_SNAKE_CASE = 'mid'
@property
def lowerCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = super().prepare_init_args_and_inputs_for_common()
snake_case_ = 3_2
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = [0.7_143, 1.9_974, 0.5_448, 1.3_977, 0.1_282, -1.1_237, -1.4_238, 0.5_530, 0.8_880]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def lowerCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ = [-0.2_041, -0.4_165, -0.3_022, 0.0_041, -0.6_628, -0.7_053, 0.1_928, -0.0_325, 0.0_523]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ResnetUpsampleBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = [0.2_287, 0.3_549, -0.1_346, 0.4_797, -0.1_715, -0.9_649, 0.7_305, -0.5_864, -0.6_244]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CrossAttnUpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def lowerCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = super().prepare_init_args_and_inputs_for_common()
snake_case_ = 3_2
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ = [-0.1_403, -0.3_515, -0.0_420, -0.1_425, 0.3_167, 0.5_094, -0.2_181, 0.5_931, 0.5_582]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SimpleCrossAttnUpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase , include_encoder_hidden_states=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
snake_case_ , snake_case_ = super().prepare_init_args_and_inputs_for_common()
snake_case_ = 3_2
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = [0.2_645, 0.1_480, 0.0_909, 0.8_044, -0.9_758, -0.9_083, 0.0_994, -1.1_453, -0.7_402]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnUpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def lowerCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = [0.0_979, 0.1_326, 0.0_021, 0.0_659, 0.2_249, 0.0_059, 0.1_132, 0.5_952, 0.1_033]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SkipUpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
snake_case_ = [-0.0_893, -0.1_234, -0.1_506, -0.0_332, 0.0_123, -0.0_211, 0.0_566, 0.0_143, 0.0_362]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnSkipUpBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
snake_case_ = [0.0_361, 0.0_617, 0.2_787, -0.0_350, 0.0_342, 0.3_421, -0.0_843, 0.0_913, 0.3_015]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UpDecoderBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_temb=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
snake_case_ = {"in_channels": 3_2, "out_channels": 3_2}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
snake_case_ = [0.4_404, 0.1_998, -0.9_886, -0.3_320, -0.3_128, -0.7_034, -0.6_955, -0.2_338, -0.3_137]
super().test_output(_lowerCAmelCase )
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = AttnUpDecoderBlockaD # noqa F405
_SCREAMING_SNAKE_CASE = 'up'
@property
def lowerCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_temb=_lowerCAmelCase )
def lowerCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
snake_case_ = {"in_channels": 3_2, "out_channels": 3_2}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = [0.6_738, 0.4_491, 0.1_055, 1.0_710, 0.7_316, 0.3_339, 0.3_352, 0.1_023, 0.3_568]
super().test_output(_lowerCAmelCase )
| 283 | 1 |
"""simple docstring"""
def lowercase ( __UpperCamelCase ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__magic_name__ = grid[0]
for row_n in range(1 , len(__UpperCamelCase ) ):
__magic_name__ = grid[row_n]
__magic_name__ = fill_row(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = grid[row_n]
return grid[-1][-1]
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(__UpperCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__lowerCamelCase = "hf-internal-testing/tiny-random-bert"
__lowerCamelCase = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__lowerCamelCase = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class _lowercase ( unittest.TestCase ):
def lowerCAmelCase__ ( self ):
__magic_name__ = cached_file(UpperCamelCase_ , UpperCamelCase_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) ) )
with open(os.path.join(UpperCamelCase_ , '''refs''' , '''main''' ) ) as f:
__magic_name__ = f.read()
self.assertEqual(UpperCamelCase_ , os.path.join(UpperCamelCase_ , '''snapshots''' , UpperCamelCase_ , UpperCamelCase_ ) )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
# File is cached at the same place the second time.
__magic_name__ = cached_file(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# Using a specific revision to test the full commit hash.
__magic_name__ = cached_file(UpperCamelCase_ , UpperCamelCase_ , revision='''9b8c223''' )
self.assertEqual(UpperCamelCase_ , os.path.join(UpperCamelCase_ , '''snapshots''' , UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self ):
with self.assertRaisesRegex(UpperCamelCase_ , '''is not a valid model identifier''' ):
__magic_name__ = cached_file('''tiny-random-bert''' , UpperCamelCase_ )
with self.assertRaisesRegex(UpperCamelCase_ , '''is not a valid git identifier''' ):
__magic_name__ = cached_file(UpperCamelCase_ , UpperCamelCase_ , revision='''aaaa''' )
with self.assertRaisesRegex(UpperCamelCase_ , '''does not appear to have a file named''' ):
__magic_name__ = cached_file(UpperCamelCase_ , '''conf''' )
def lowerCAmelCase__ ( self ):
with self.assertRaisesRegex(UpperCamelCase_ , '''does not appear to have a file named''' ):
__magic_name__ = cached_file(UpperCamelCase_ , '''conf''' )
with open(os.path.join(UpperCamelCase_ , '''refs''' , '''main''' ) ) as f:
__magic_name__ = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase_ , '''.no_exist''' , UpperCamelCase_ , '''conf''' ) ) )
__magic_name__ = cached_file(UpperCamelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCamelCase_ )
self.assertIsNone(UpperCamelCase_ )
__magic_name__ = cached_file(UpperCamelCase_ , '''conf''' , local_files_only=UpperCamelCase_ , _raise_exceptions_for_missing_entries=UpperCamelCase_ )
self.assertIsNone(UpperCamelCase_ )
__magic_name__ = mock.Mock()
__magic_name__ = 500
__magic_name__ = {}
__magic_name__ = HTTPError
__magic_name__ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=UpperCamelCase_ ) as mock_head:
__magic_name__ = cached_file(UpperCamelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCamelCase_ )
self.assertIsNone(UpperCamelCase_ )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase_ , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , UpperCamelCase_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase_ , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , UpperCamelCase_ , revision='''ahaha''' )
__magic_name__ = get_file_from_repo('''bert-base-cased''' , UpperCamelCase_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
__magic_name__ = json.loads(open(UpperCamelCase_ , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def lowerCAmelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = Path(UpperCamelCase_ ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase_ , '''a.txt''' ) , str(UpperCamelCase_ ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase_ , '''b.txt''' ) )
| 190 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=10_00 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
lowerCAmelCase = range_bbox
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase = bbox[i, j, 3]
lowerCAmelCase = bbox[i, j, 1]
lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase = bbox[i, j, 2]
lowerCAmelCase = bbox[i, j, 0]
lowerCAmelCase = t
lowerCAmelCase = tf.convert_to_tensor(_snake_case )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModel(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , token_type_ids=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForMaskedLM(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMForSequenceClassification(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFLayoutLMForTokenClassification(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForQuestionAnswering(config=_snake_case )
lowerCAmelCase = model(_snake_case , _snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = True
snake_case__ = 1_0
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFLayoutLMModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ():
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowerCAmelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCAmelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCAmelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
# test the sequence output on [0, :3, :3]
lowerCAmelCase = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1E-3 ) )
# test the pooled output on [1, :3]
lowerCAmelCase = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _snake_case , atol=1E-3 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(
input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCAmelCase = outputs.loss
lowerCAmelCase = (2,)
self.assertEqual(loss.shape , _snake_case )
# test the shape of the logits
lowerCAmelCase = outputs.logits
lowerCAmelCase = (2, 2)
self.assertEqual(logits.shape , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(
input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
# test the shape of the logits
lowerCAmelCase = outputs.logits
lowerCAmelCase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = prepare_layoutlm_batch_inputs()
# forward pass
lowerCAmelCase = model(input_ids=_snake_case , bbox=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
# test the shape of the logits
lowerCAmelCase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _snake_case )
self.assertEqual(outputs.end_logits.shape , _snake_case )
| 4 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( a__ , unittest.TestCase ):
snake_case__ = DanceDiffusionPipeline
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
snake_case__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_snake_case , use_timestep_embedding=_snake_case , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase = IPNDMScheduler()
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = DanceDiffusionPipeline(**_snake_case )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = self.get_dummy_inputs(_snake_case )
lowerCAmelCase = pipe(**_snake_case )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = torch_device
lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(generator=_snake_case , num_inference_steps=1_00 , audio_length_in_s=4.096 )
lowerCAmelCase = output.audios
lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
SCREAMING_SNAKE_CASE__ : Dict = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = torch.load(lowerCAmelCase_, map_location='''cpu''' )
return sd
def __lowercase ( snake_case, snake_case, snake_case=rename_keys_prefix ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = OrderedDict()
__magic_name__ :Tuple = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__magic_name__ :Dict = key
for name_pair in rename_keys_prefix:
__magic_name__ :Optional[Any] = new_key.replace(name_pair[0], name_pair[1] )
__magic_name__ :List[str] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__magic_name__ :Dict = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__magic_name__ :str = '''pretraining'''
if "vcr" in checkpoint_path:
__magic_name__ :List[str] = {'''visual_embedding_dim''': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
__magic_name__ :Optional[int] = {'''visual_embedding_dim''': 2_0_4_8}
elif "vqa" in checkpoint_path:
__magic_name__ :Tuple = {'''visual_embedding_dim''': 2_0_4_8}
elif "nlvr" in checkpoint_path:
__magic_name__ :Dict = {'''visual_embedding_dim''': 1_0_2_4}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__magic_name__ :str = {'''visual_embedding_dim''': 5_1_2}
__magic_name__ :Optional[int] = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
__magic_name__ :int = {'''visual_embedding_dim''': 2_0_4_8}
__magic_name__ :Dict = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
__magic_name__ :Optional[int] = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9}
__magic_name__ :Dict = '''vqa'''
elif "nlvr" in checkpoint_path:
__magic_name__ :int = {
'''visual_embedding_dim''': 1_0_2_4,
'''num_labels''': 2,
}
__magic_name__ :Optional[Any] = '''nlvr'''
__magic_name__ :str = VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
__magic_name__ :Union[str, Any] = load_state_dict(lowerCAmelCase_ )
__magic_name__ :Tuple = get_new_dict(lowerCAmelCase_, lowerCAmelCase_ )
if model_type == "pretraining":
__magic_name__ :List[str] = VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
__magic_name__ :Union[str, Any] = VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
__magic_name__ :Optional[Any] = VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
__magic_name__ :Optional[int] = VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 703 |
from ...processing_utils import ProcessorMixin
class lowerCamelCase_ ( lowerCamelCase ):
a__ = '''WhisperFeatureExtractor'''
a__ = '''WhisperTokenizer'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = self.feature_extractor
__magic_name__ :int = False
def A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase )
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :List[Any] = kwargs.pop('''audio''' , __lowerCAmelCase )
__magic_name__ :Any = kwargs.pop('''sampling_rate''' , __lowerCAmelCase )
__magic_name__ :str = kwargs.pop('''text''' , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__magic_name__ :Any = args[0]
__magic_name__ :Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__magic_name__ :Optional[int] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None:
__magic_name__ :Optional[int] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__magic_name__ :str = encodings['''input_ids''']
return inputs
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase="np" ):
"""simple docstring"""
return self.tokenizer.get_prompt_ids(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
| 180 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 95 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase_ (__A ):
def __init__( self : List[Any] , lowerCAmelCase_ : TransformeraDModel , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : KarrasDiffusionSchedulers , lowerCAmelCase_ : Optional[Dict[int, str]] = None , ) -> List[Any]:
super().__init__()
self.register_modules(transformer=lowerCAmelCase_ , vae=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
# create a imagenet -> id dictionary for easier use
UpperCAmelCase_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
UpperCAmelCase_ : Union[str, Any] = int(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = dict(sorted(self.labels.items() ) )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, List[str]] ) -> List[int]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : str = list(lowerCAmelCase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase_ : Optional[int] = len(lowerCAmelCase_ )
UpperCAmelCase_ : int = self.transformer.config.sample_size
UpperCAmelCase_ : Optional[Any] = self.transformer.config.in_channels
UpperCAmelCase_ : Tuple = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase_ , device=self.device , dtype=self.transformer.dtype , )
UpperCAmelCase_ : str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCAmelCase_ : List[Any] = torch.tensor(lowerCAmelCase_ , device=self.device ).reshape(-1 )
UpperCAmelCase_ : Optional[Any] = torch.tensor([1_000] * batch_size , device=self.device )
UpperCAmelCase_ : int = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCAmelCase_ : Optional[Any] = latent_model_input[: len(lowerCAmelCase_ ) // 2]
UpperCAmelCase_ : int = torch.cat([half, half] , dim=0 )
UpperCAmelCase_ : Optional[int] = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = t
if not torch.is_tensor(lowerCAmelCase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCAmelCase_ : List[Any] = latent_model_input.device.type == "mps"
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = torch.floataa if is_mps else torch.floataa
else:
UpperCAmelCase_ : int = torch.intaa if is_mps else torch.intaa
UpperCAmelCase_ : Optional[int] = torch.tensor([timesteps] , dtype=lowerCAmelCase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCAmelCase_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCAmelCase_ : List[Any] = self.transformer(
lowerCAmelCase_ , timestep=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ).sample
# perform guidance
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = torch.split(lowerCAmelCase_ , len(lowerCAmelCase_ ) // 2 , dim=0 )
UpperCAmelCase_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCAmelCase_ : Tuple = torch.cat([half_eps, half_eps] , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = torch.split(lowerCAmelCase_ , lowerCAmelCase_ , dim=1 )
else:
UpperCAmelCase_ : Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = latent_model_input.chunk(2 , dim=0 )
else:
UpperCAmelCase_ : Optional[int] = latent_model_input
UpperCAmelCase_ : Dict = 1 / self.vae.config.scaling_factor * latents
UpperCAmelCase_ : Dict = self.vae.decode(lowerCAmelCase_ ).sample
UpperCAmelCase_ : Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_ : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Optional[int] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 95 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=True , lowercase_="pt" ) -> Any:
'''simple docstring'''
snake_case_ = {"""add_prefix_space""": True} if isinstance(lowercase_ , lowercase_ ) and not line.startswith(""" """ ) else {}
snake_case_ = padding_side
return tokenizer(
[line] , max_length=lowercase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowercase_ , return_tensors=lowercase_ , add_special_tokens=lowercase_ , **lowercase_ , )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ = input_ids.ne(lowercase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCamelCase ( __snake_case ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="train" , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="" , ) -> Optional[int]:
super().__init__()
snake_case_ = Path(lowerCamelCase ).joinpath(type_path + """.source""" )
snake_case_ = Path(lowerCamelCase ).joinpath(type_path + """.target""" )
snake_case_ = self.get_char_lens(self.src_file )
snake_case_ = max_source_length
snake_case_ = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
snake_case_ = tokenizer
snake_case_ = prefix
if n_obs is not None:
snake_case_ = self.src_lens[:n_obs]
snake_case_ = src_lang
snake_case_ = tgt_lang
def __len__( self ) -> Any:
return len(self.src_lens )
def __getitem__( self , lowerCamelCase ) -> Dict[str, torch.Tensor]:
snake_case_ = index + 1 # linecache starts at 1
snake_case_ = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase ).rstrip("""\n""" )
snake_case_ = linecache.getline(str(self.tgt_file ) , lowerCamelCase ).rstrip("""\n""" )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
)
snake_case_ = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase ) else self.tokenizer
snake_case_ = encode_line(lowerCamelCase , lowerCamelCase , self.max_source_length , """right""" )
snake_case_ = encode_line(lowerCamelCase , lowerCamelCase , self.max_target_length , """right""" )
snake_case_ = source_inputs["""input_ids"""].squeeze()
snake_case_ = target_inputs["""input_ids"""].squeeze()
snake_case_ = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase_ ( lowerCamelCase ) -> Dict:
return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()]
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Dict[str, torch.Tensor]:
snake_case_ = torch.stack([x["""input_ids"""] for x in batch] )
snake_case_ = torch.stack([x["""attention_mask"""] for x in batch] )
snake_case_ = torch.stack([x["""decoder_input_ids"""] for x in batch] )
snake_case_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
snake_case_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase )
else self.tokenizer.pad_token_id
)
snake_case_ = trim_batch(lowerCamelCase , lowerCamelCase )
snake_case_ , snake_case_ = trim_batch(lowerCamelCase , lowerCamelCase , attention_mask=lowerCamelCase )
snake_case_ = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
lowerCamelCase_ = getLogger(__name__)
def UpperCamelCase( lowercase_ ) -> Tuple:
'''simple docstring'''
return list(itertools.chain.from_iterable(lowercase_ ) )
def UpperCamelCase( lowercase_ ) -> None:
'''simple docstring'''
snake_case_ = get_git_info()
save_json(lowercase_ , os.path.join(lowercase_ , """git_log.json""" ) )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=4 , **lowercase_ ) -> str:
'''simple docstring'''
with open(lowercase_ , """w""" ) as f:
json.dump(lowercase_ , lowercase_ , indent=lowercase_ , **lowercase_ )
def UpperCamelCase( lowercase_ ) -> Any:
'''simple docstring'''
with open(lowercase_ ) as f:
return json.load(lowercase_ )
def UpperCamelCase( ) -> int:
'''simple docstring'''
snake_case_ = git.Repo(search_parent_directories=lowercase_ )
snake_case_ = {
"""repo_id""": str(lowercase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def UpperCamelCase( lowercase_ , lowercase_ ) -> List:
'''simple docstring'''
return list(map(lowercase_ , lowercase_ ) )
def UpperCamelCase( lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
with open(lowercase_ , """wb""" ) as f:
return pickle.dump(lowercase_ , lowercase_ )
def UpperCamelCase( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
def remove_articles(lowercase_ ):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowercase_ )
def white_space_fix(lowercase_ ):
return " ".join(text.split() )
def remove_punc(lowercase_ ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def UpperCamelCase( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = normalize_answer(lowercase_ ).split()
snake_case_ = normalize_answer(lowercase_ ).split()
snake_case_ = Counter(lowercase_ ) & Counter(lowercase_ )
snake_case_ = sum(common.values() )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(lowercase_ )
snake_case_ = 1.0 * num_same / len(lowercase_ )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
return normalize_answer(lowercase_ ) == normalize_answer(lowercase_ )
def UpperCamelCase( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
assert len(lowercase_ ) == len(lowercase_ )
snake_case_ = 0
for hypo, pred in zip(lowercase_ , lowercase_ ):
em += exact_match_score(lowercase_ , lowercase_ )
if len(lowercase_ ) > 0:
em /= len(lowercase_ )
return {"em": em}
def UpperCamelCase( lowercase_ ) -> List[Any]:
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
snake_case_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ = """dropout_rate"""
for p in extra_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
if not hasattr(lowercase_ , lowercase_ ) and not hasattr(lowercase_ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(lowercase_ ) )
delattr(lowercase_ , lowercase_ )
continue
snake_case_ = p if hasattr(lowercase_ , lowercase_ ) else equivalent_param[p]
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
delattr(lowercase_ , lowercase_ )
return hparams, config | 161 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Tuple = 'vit_msn'
def __init__( self , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1e-06 , lowerCamelCase=224 , lowerCamelCase=16 , lowerCamelCase=3 , lowerCamelCase=True , **lowerCamelCase , ) -> Optional[int]:
super().__init__(**lowerCamelCase )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias | 161 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : List[str] = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """xlm-roberta"""
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-12 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> List[str]:
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = classifier_dropout
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 23 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _lowercase :
def __init__( self ):
snake_case__ : List[str] =""""""
snake_case__ : List[Any] =""""""
snake_case__ : Optional[int] =[]
snake_case__ : Tuple =0
snake_case__ : Optional[Any] =2_5_6
snake_case__ : Optional[Any] =0
snake_case__ : str =0
snake_case__ : Any =0
snake_case__ : Dict =0
def lowercase__ ( self , a ):
snake_case__ : List[str] =cva.imread(a , 0 )
snake_case__ : Optional[Any] =copy.deepcopy(self.img )
snake_case__ , snake_case__ , snake_case__ : Any =plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
snake_case__ : Tuple =np.sum(a )
for i in range(len(a ) ):
snake_case__ : Union[str, Any] =x[i] / self.k
self.sk += prk
snake_case__ : Tuple =(self.L - 1) * self.sk
if self.rem != 0:
snake_case__ : int =int(last % last )
snake_case__ : List[Any] =int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(a )
snake_case__ : List[Any] =int(np.ma.count(self.img ) / self.img[1].size )
snake_case__ : Union[str, Any] =self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case__ : Optional[int] =self.img[j][i]
if num != self.last_list[num]:
snake_case__ : Any =self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def lowercase__ ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def lowercase__ ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
__lowerCamelCase : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 385 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , lowercase_):
snake_case_ : Dict = parent
def snake_case__ ( self):
return {}
def UpperCamelCase_ ( ):
"""simple docstring"""
snake_case_ : Union[str, Any] = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
snake_case_ : Optional[int] = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class UpperCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCAmelCase_ = MarkupLMFeatureExtractor if is_bsa_available() else None
def snake_case__ ( self):
snake_case_ : Union[str, Any] = MarkupLMFeatureExtractionTester(self)
@property
def snake_case__ ( self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def snake_case__ ( self):
snake_case_ : Union[str, Any] = self.feature_extraction_class()
# Test not batched input
snake_case_ : Optional[int] = get_html_strings()[0]
snake_case_ : int = feature_extractor(lowercase_)
# fmt: off
snake_case_ : Tuple = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
snake_case_ : Any = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , lowercase_)
self.assertEqual(encoding.xpaths , lowercase_)
# Test batched
snake_case_ : Dict = get_html_strings()
snake_case_ : Optional[int] = feature_extractor(lowercase_)
# fmt: off
snake_case_ : str = expected_nodes + [["My First Heading", "My first paragraph."]]
snake_case_ : List[Any] = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes) , 2)
self.assertEqual(len(encoding.xpaths) , 2)
self.assertEqual(encoding.nodes , lowercase_)
self.assertEqual(encoding.xpaths , lowercase_)
| 701 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
a_ = logging.get_logger(__name__)
a_ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( snake_case__ ):
UpperCAmelCase_ = """longformer"""
def __init__( self , lowercase_ = 5_12 , lowercase_ = 2 , lowercase_ = 1 , lowercase_ = 0 , lowercase_ = 2 , lowercase_ = 3_05_22 , lowercase_ = 7_68 , lowercase_ = 12 , lowercase_ = 12 , lowercase_ = 30_72 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 5_12 , lowercase_ = 2 , lowercase_ = 0.02 , lowercase_ = 1E-12 , lowercase_ = False , **lowercase_ , ):
super().__init__(pad_token_id=lowercase_ , **lowercase_)
snake_case_ : Dict = attention_window
snake_case_ : Tuple = sep_token_id
snake_case_ : Optional[Any] = bos_token_id
snake_case_ : str = eos_token_id
snake_case_ : Optional[int] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : str = type_vocab_size
snake_case_ : Tuple = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Tuple = onnx_export
class UpperCAmelCase_ ( snake_case__ ):
def __init__( self , lowercase_ , lowercase_ = "default" , lowercase_ = None):
super().__init__(lowercase_ , lowercase_ , lowercase_)
snake_case_ : Dict = True
@property
def snake_case__ ( self):
if self.task == "multiple-choice":
snake_case_ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case_ : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
])
@property
def snake_case__ ( self):
snake_case_ : Union[str, Any] = super().outputs
if self.task == "default":
snake_case_ : str = {0: "batch"}
return outputs
@property
def snake_case__ ( self):
return 1E-4
@property
def snake_case__ ( self):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14)
def snake_case__ ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ):
snake_case_ : Optional[Any] = super().generate_dummy_inputs(
preprocessor=lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
snake_case_ : Any = torch.zeros_like(inputs["input_ids"])
# make every second token global
snake_case_ : Tuple = 1
return inputs
| 92 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = LayoutLMTokenizer
lowerCAmelCase__ = LayoutLMTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase_ ( self : List[str] , **_A : List[str] ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : List[str] , _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''UNwant\u00E9d,running'''
UpperCAmelCase__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase__ : Optional[int] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [7, 4, 5, 10, 8, 9] )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
| 75 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'ctrl'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Any = n_positions
UpperCAmelCase__ : Optional[Any] = n_embd
UpperCAmelCase__ : List[str] = n_layer
UpperCAmelCase__ : Any = n_head
UpperCAmelCase__ : int = dff
UpperCAmelCase__ : str = resid_pdrop
UpperCAmelCase__ : Tuple = embd_pdrop
UpperCAmelCase__ : int = layer_norm_epsilon
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Union[str, Any] = use_cache
super().__init__(**_A )
| 75 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('T')
class A__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , A_ : T ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = data
_lowerCAmelCase : Node[T] | None = None
def __str__( self : Any ):
'''simple docstring'''
return F'''{self.data}'''
class A__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : Node[T] | None = None
def __iter__( self : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Any = self.top
while node:
yield node.data
_lowerCAmelCase : Union[str, Any] = node.next
def __str__( self : int ):
'''simple docstring'''
return "->".join([str(A_ ) for item in self] )
def __len__( self : str ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.top is None
def __magic_name__ ( self : Optional[int] , A_ : T ):
'''simple docstring'''
_lowerCAmelCase : Any = Node(A_ )
if not self.is_empty():
_lowerCAmelCase : str = self.top
_lowerCAmelCase : Optional[Any] = node
def __magic_name__ ( self : str ):
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , A_ )
_lowerCAmelCase : Optional[int] = self.top
_lowerCAmelCase : Optional[Any] = self.top.next
return pop_node.data
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : Tuple = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 503 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 503 | 1 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =GPTSanJapaneseTokenizer
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Optional[Any] ={'''do_clean_text''': False, '''add_prefix_space''': False}
def _A (self ):
super().setUp()
# fmt: off
__lowercase= ['ใใ', 'ใใใซ', 'ใซใกใฏ', 'ใฐใใฏ', 'ไธ็,ใบ็', 'ใ', 'ใ', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
__lowercase= {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # ๐
__lowercase= {'unk_token': '<unk>'}
__lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(lowerCAmelCase ) )
def _A (self , **lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใใบ็ใ๐'
__lowercase= 'ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ๐'
return input_text, output_text
def _A (self , lowerCAmelCase ):
__lowercase, __lowercase= self.get_input_output_texts(lowerCAmelCase )
__lowercase= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
__lowercase= tokenizer.decode(lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
return text, ids
def _A (self ):
pass # TODO add if relevant
def _A (self ):
pass # TODO add if relevant
def _A (self ):
pass # TODO add if relevant
def _A (self ):
__lowercase= self.get_tokenizer()
# Testing tokenization
__lowercase= 'ใใใซใกใฏใไธ็ใใใใใฐใใฏใใบ็ใ'
__lowercase= ['ใใ', 'ใซใกใฏ', 'ใ', 'ไธ็', 'ใ', '<SP>', 'ใใ', 'ใฐใใฏ', 'ใ', 'ใบ็', 'ใ']
__lowercase= tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Testing conversion to ids without special tokens
__lowercase= [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__lowercase= tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Testing conversion to ids with special tokens
__lowercase= tokens + [tokenizer.unk_token]
__lowercase= [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
__lowercase= tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.get_tokenizer()
# Testing tokenization
__lowercase= 'ใใใซใกใฏใ<|bagoftoken|>ไธ็ใใใใฐใใฏใ<|bagoftoken|>ใบ็ใ'
__lowercase= 'ใใใซใกใฏใใใใไธ็ใใใใฐใใฏใใใใไธ็ใ'
__lowercase= tokenizer.encode(lowerCAmelCase )
__lowercase= tokenizer.decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
@slow
def _A (self ):
__lowercase= self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__lowercase= 'ใใใซใกใฏใไธ็ใ'
__lowercase= 'ใใใฐใใฏใใบ็ใ๐'
__lowercase= 'ใใใซใกใฏใไธ็ใใใใฐใใฏใไธ็ใ๐'
__lowercase= tokenizer.encode(prefix_text + input_text )
__lowercase= tokenizer.encode('' , prefix_text=prefix_text + input_text )
__lowercase= tokenizer.encode(lowerCAmelCase , prefix_text=lowerCAmelCase )
__lowercase= tokenizer.decode(lowerCAmelCase )
__lowercase= tokenizer.decode(lowerCAmelCase )
__lowercase= tokenizer.decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
@slow
def _A (self ):
__lowercase= self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__lowercase= 'ใใใซใกใฏใไธ็ใ'
__lowercase= 'ใใใฐใใฏใใบ็ใ๐'
__lowercase= len(tokenizer.encode(lowerCAmelCase ) ) - 2
__lowercase= len(tokenizer.encode(lowerCAmelCase ) ) - 2
__lowercase= [1] + [0] * (len_prefix + len_text + 1)
__lowercase= [1] * (len_prefix + len_text + 1) + [0]
__lowercase= [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__lowercase= tokenizer(prefix_text + input_text ).token_type_ids
__lowercase= tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
__lowercase= tokenizer(lowerCAmelCase , prefix_text=lowerCAmelCase ).token_type_ids
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@slow
def _A (self ):
__lowercase= self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__lowercase= tokenizer.encode('ใใณใใฏ' )
__lowercase= tokenizer.encode('' , prefix_text='ใใณใใฏ' )
__lowercase= tokenizer.encode('ใใฏ' , prefix_text='ใใณ' )
self.assertEqual(tokenizer.decode(lowerCAmelCase ) , tokenizer.decode(lowerCAmelCase ) )
self.assertEqual(tokenizer.decode(lowerCAmelCase ) , tokenizer.decode(lowerCAmelCase ) )
self.assertNotEqual(lowerCAmelCase , lowerCAmelCase )
self.assertNotEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def _A (self ):
__lowercase= self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__lowercase= [['ๆญฆ็ฐไฟก็', 'ใฏใ'], ['็น็ฐไฟก้ท', 'ใฎ้
ไธใฎใ']]
__lowercase= tokenizer(lowerCAmelCase , padding=lowerCAmelCase )
__lowercase= tokenizer.batch_encode_plus(lowerCAmelCase , padding=lowerCAmelCase )
# fmt: off
__lowercase= [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
__lowercase= [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__lowercase= [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowerCAmelCase )
self.assertListEqual(x_token.token_type_ids , lowerCAmelCase )
self.assertListEqual(x_token.attention_mask , lowerCAmelCase )
self.assertListEqual(x_token_a.input_ids , lowerCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , lowerCAmelCase )
self.assertListEqual(x_token_a.attention_mask , lowerCAmelCase )
def _A (self ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def _A (self ):
# tokenizer has no padding token
pass
| 230 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowerCAmelCase = logging.get_logger(__name__)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= set()
__lowercase= []
def parse_line(lowercase__ ):
for line in fp:
if isinstance(lowercase__ , lowercase__ ):
__lowercase= line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(lowercase__ ) > 0:
__lowercase= '\n'.join(lowercase__ )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowercase__ )
buffer.clear()
continue
else:
__lowercase= line.strip()
buffer.append(lowercase__ )
if from_gh:
for filename in os.listdir(lowercase__ ):
__lowercase= os.path.join(lowercase__ , lowercase__ )
if not os.path.isdir(lowercase__ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowercase__ ) as fp:
parse_line(lowercase__ )
else:
try:
with zipfile.ZipFile(lowercase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowercase__ ) as fp:
parse_line(lowercase__ )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= set()
__lowercase= [os.path.join(lowercase__ , lowercase__ ) for p in os.listdir(lowercase__ ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowercase__ , lowercase__ ) )
return selected_warnings
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return values.split(',' )
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowerCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowerCAmelCase = extract_warnings(args.output_dir, args.targets)
lowerCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 230 | 1 |
from __future__ import annotations
import queue
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , a__ ) -> Dict:
A = data
A = None
A = None
def _lowerCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
A = input("""Enter the value of the root node: """ ).strip().lower()
A = queue.Queue()
A = TreeNode(int(UpperCamelCase__ ) )
q.put(UpperCamelCase__ )
while not q.empty():
A = q.get()
A = f'Enter the left node of {node_found.data}: '
A = input(UpperCamelCase__ ).strip().lower() or """n"""
if check == "n":
return tree_node
A = TreeNode(int(UpperCamelCase__ ) )
A = left_node
q.put(UpperCamelCase__ )
A = f'Enter the right node of {node_found.data}: '
A = input(UpperCamelCase__ ).strip().lower() or """n"""
if check == "n":
return tree_node
A = TreeNode(int(UpperCamelCase__ ) )
A = right_node
q.put(UpperCamelCase__ )
raise
def _lowerCAmelCase ( UpperCamelCase__: Dict ) -> None:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def _lowerCAmelCase ( UpperCamelCase__: Optional[int] ) -> None:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] ) -> None:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def _lowerCAmelCase ( UpperCamelCase__: Dict ) -> None:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
A = queue.Queue()
q.put(UpperCamelCase__ )
while not q.empty():
A = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _lowerCAmelCase ( UpperCamelCase__: List[str] ) -> None:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
A = queue.Queue()
q.put(UpperCamelCase__ )
while not q.empty():
A = []
while not q.empty():
A = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(UpperCamelCase__ )
def _lowerCAmelCase ( UpperCamelCase__: Dict ) -> None:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
A = []
A = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(UpperCamelCase__ )
A = n.left
# end of while means current node doesn't have left child
A = stack.pop()
# start to traverse its right child
A = n.right
def _lowerCAmelCase ( UpperCamelCase__: List[str] ) -> None:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
A = []
A = node
while n or stack:
while n:
stack.append(UpperCamelCase__ )
A = n.left
A = stack.pop()
print(n.data , end=""",""" )
A = n.right
def _lowerCAmelCase ( UpperCamelCase__: Dict ) -> None:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not node:
return
A , A = [], []
A = node
stacka.append(UpperCamelCase__ )
while stacka: # to find the reversed order of post order, store it in stack2
A = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(UpperCamelCase__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] = "" , UpperCamelCase__: Optional[int]=50 , UpperCamelCase__: str="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
A , A = divmod(width - len(UpperCamelCase__ ) - 2 , 2 )
return f'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
_lowercase : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt()) | 720 |
from __future__ import annotations
from typing import Any
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , a__ , a__ , a__ = 0 ) -> None:
A , A = row, column
A = [[default_value for c in range(a__ )] for r in range(a__ )]
def __str__( self ) -> str:
A = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
A = 0
for row_vector in self.array:
for obj in row_vector:
A = max(a__ , len(str(a__ ) ) )
A = f'%{max_element_length}s'
# Make string and return
def single_line(a__ ) -> str:
nonlocal string_format_identifier
A = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(a__ ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
return str(self )
def _UpperCAmelCase ( self , a__ ) -> bool:
if not (isinstance(a__ , (list, tuple) ) and len(a__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , a__ ) -> Any:
assert self.validate_indicies(a__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self , a__ , a__ ) -> None:
assert self.validate_indicies(a__ )
A = value
def __add__( self , a__ ) -> Matrix:
assert isinstance(a__ , a__ )
assert self.row == another.row and self.column == another.column
# Add
A = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
A = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A = -self[r, c]
return result
def __sub__( self , a__ ) -> Matrix:
return self + (-another)
def __mul__( self , a__ ) -> Matrix:
if isinstance(a__ , (int, float) ): # Scalar multiplication
A = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A = self[r, c] * another
return result
elif isinstance(a__ , a__ ): # Matrix multiplication
assert self.column == another.row
A = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
A = f'Unsupported type given for another ({type(a__ )})'
raise TypeError(a__ )
def _UpperCAmelCase ( self ) -> Matrix:
A = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
A = self[r, c]
return result
def _UpperCAmelCase ( self , a__ , a__ ) -> Any:
assert isinstance(a__ , a__ ) and isinstance(a__ , a__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
A = v.transpose()
A = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
A = Matrix(3 , 3 , 0 )
for i in range(3 ):
A = 1
print(f'a^(-1) is {ainv}' )
# u, v
A = Matrix(3 , 1 , 0 )
A , A , A = 1, 2, -3
A = Matrix(3 , 1 , 0 )
A , A , A = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCamelCase__ , UpperCamelCase__ )}' )
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 546 | 0 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 42 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE_ = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def A__ ( A__ ) -> str:
'''simple docstring'''
re.sub("<n>" , "" , A__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A__ ) )
| 426 | 0 |
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> float:
"""simple docstring"""
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def UpperCAmelCase_ ( snake_case__ ) -> None:
"""simple docstring"""
if point:
if isinstance(snake_case__ , snake_case__ ):
for item in point:
if not isinstance(snake_case__ , (int, float) ):
lowerCAmelCase__ = (
'Expected a list of numbers as input, found '
f'{type(snake_case__ ).__name__}'
)
raise TypeError(snake_case__ )
else:
lowerCAmelCase__ = f'Expected a list of numbers as input, found {type(snake_case__ ).__name__}'
raise TypeError(snake_case__ )
else:
raise ValueError('Missing an input' )
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> float:
"""simple docstring"""
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(snake_case__ , snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 604 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
lowerCAmelCase__ = {
'input_ids': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] ,dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] ,dtype=tf.intaa ),
}
lowerCAmelCase__ = model(a_ )['last_hidden_state']
lowerCAmelCase__ = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape ,a_ )
# compare the actual values for a slice.
lowerCAmelCase__ = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 604 | 1 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class a__ :
def __init__( self , A , A=13 , A=7 , A=6 , A=17 , A=23 , A=11 , A=True , ) -> Optional[int]:
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = act_dim
a = state_dim
a = hidden_size
a = max_length
a = is_training
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
a = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
a = floats_tensor((self.batch_size, self.seq_length, 1) )
a = floats_tensor((self.batch_size, self.seq_length, 1) )
a = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
a = random_attention_mask((self.batch_size, self.seq_length) )
a = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowerCAmelCase_ ( self , A , A , A , A , A , A , A , ) -> Union[str, Any]:
'''simple docstring'''
a = DecisionTransformerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
a = model(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class a__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
a : Optional[int] = (DecisionTransformerModel,) if is_torch_available() else ()
a : List[str] = ()
a : Optional[Any] = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
a : Optional[int] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
a : Optional[Any] = False
a : Optional[Any] = False
a : List[Any] = False
a : Any = False
a : Any = False
a : str = False
a : Tuple = False
a : int = False
a : int = False
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
a = DecisionTransformerModelTester(self )
a = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
@slow
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = DecisionTransformerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__lowercase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(__lowercase )] , __lowercase )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
a = 2 # number of steps of autoregressive prediction we will perform
a = 10 # defined by the RL environment, may be normalized
a = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
a = model.to(__lowercase )
a = model.config
torch.manual_seed(0 )
a = torch.randn(1 , 1 , config.state_dim ).to(device=__lowercase , dtype=torch.floataa ) # env.reset()
a = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=__lowercase )
a = torch.tensor(__lowercase , device=__lowercase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
a = state
a = torch.zeros(1 , 0 , config.act_dim , device=__lowercase , dtype=torch.floataa )
a = torch.zeros(1 , 0 , device=__lowercase , dtype=torch.floataa )
a = torch.tensor(0 , device=__lowercase , dtype=torch.long ).reshape(1 , 1 )
for step in range(__lowercase ):
a = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__lowercase )] , dim=1 )
a = torch.cat([rewards, torch.zeros(1 , 1 , device=__lowercase )] , dim=1 )
a = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
a , a , a = model(
states=__lowercase , actions=__lowercase , rewards=__lowercase , returns_to_go=__lowercase , timesteps=__lowercase , attention_mask=__lowercase , return_dict=__lowercase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
a , a , a , a = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__lowercase , dtype=torch.floataa ),
1.0,
False,
{},
)
a = action_pred[0, -1]
a = torch.cat([states, state] , dim=1 )
a = returns_to_go[0, -1] - reward
a = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
a = torch.cat(
[timesteps, torch.ones((1, 1) , device=__lowercase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 515 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Dict =GPTSanJapaneseTokenizer
__lowerCamelCase : List[Any] =False
__lowerCamelCase : List[str] ={'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
super().setUp()
# fmt: off
__a = ["""ใใ""", """ใใใซ""", """ใซใกใฏ""", """ใฐใใฏ""", """ไธ็,ใบ็""", """ใ""", """ใ""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__a = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # ๐
__a = {"""unk_token""": """<unk>"""}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCamelCase_ ( self : Dict , **__lowercase : Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = """ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใใบ็ใ๐"""
__a = """ใใใซใกใฏใไธ็ใ \nใใใฐใใฏใไธ็ใ๐"""
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a , __a = self.get_input_output_texts(__lowercase )
__a = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__a = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.get_tokenizer()
# Testing tokenization
__a = """ใใใซใกใฏใไธ็ใใใใใฐใใฏใใบ็ใ"""
__a = ["""ใใ""", """ใซใกใฏ""", """ใ""", """ไธ็""", """ใ""", """<SP>""", """ใใ""", """ใฐใใฏ""", """ใ""", """ใบ็""", """ใ"""]
__a = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__a = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__a = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__a = tokens + [tokenizer.unk_token]
__a = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__a = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = self.get_tokenizer()
# Testing tokenization
__a = """ใใใซใกใฏใ<|bagoftoken|>ไธ็ใใใใฐใใฏใ<|bagoftoken|>ใบ็ใ"""
__a = """ใใใซใกใฏใใใใไธ็ใใใใฐใใฏใใใใไธ็ใ"""
__a = tokenizer.encode(__lowercase )
__a = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__a = """ใใใซใกใฏใไธ็ใ"""
__a = """ใใใฐใใฏใใบ็ใ๐"""
__a = """ใใใซใกใฏใไธ็ใใใใฐใใฏใไธ็ใ๐"""
__a = tokenizer.encode(prefix_text + input_text )
__a = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__a = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__a = tokenizer.decode(__lowercase )
__a = tokenizer.decode(__lowercase )
__a = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__a = """ใใใซใกใฏใไธ็ใ"""
__a = """ใใใฐใใฏใใบ็ใ๐"""
__a = len(tokenizer.encode(__lowercase ) ) - 2
__a = len(tokenizer.encode(__lowercase ) ) - 2
__a = [1] + [0] * (len_prefix + len_text + 1)
__a = [1] * (len_prefix + len_text + 1) + [0]
__a = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__a = tokenizer(prefix_text + input_text ).token_type_ids
__a = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__a = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__a = tokenizer.encode("""ใใณใใฏ""" )
__a = tokenizer.encode("""""" , prefix_text="""ใใณใใฏ""" )
__a = tokenizer.encode("""ใใฏ""" , prefix_text="""ใใณ""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__a = [["""ๆญฆ็ฐไฟก็""", """ใฏใ"""], ["""็น็ฐไฟก้ท""", """ใฎ้
ไธใฎใ"""]]
__a = tokenizer(__lowercase , padding=__lowercase )
__a = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__a = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__a = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__a = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# tokenizer has no padding token
pass
| 225 | 0 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> None:
UpperCamelCase = len(__UpperCamelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
UpperCamelCase = 0
print(__UpperCamelCase , end=""",""" )
# Consider rest of the activities
for j in range(__UpperCamelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__UpperCamelCase , end=""",""" )
UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = [1, 3, 0, 5, 8, 5]
SCREAMING_SNAKE_CASE__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 0 |
def snake_case_ ( _SCREAMING_SNAKE_CASE = 5_0 ):
__lowercase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 402 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
__lowercase = 1_0_2_4
__lowercase = 4_0_9_6
__lowercase = 2_4
__lowercase = 1_6
__lowercase = [5, 1_1, 1_7, 2_3]
__lowercase = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
__lowercase = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
__lowercase = 7_6_8
__lowercase = [1, 1, 1, 0.5]
__lowercase = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
__lowercase = 1_5_0
__lowercase = 1_6
__lowercase = (1, 3_8_4, 3_8_4)
__lowercase = False
__lowercase = "project"
if "ade" in checkpoint_url:
__lowercase = True
__lowercase = 7_6_8
__lowercase = [1, 1, 1, 0.5]
__lowercase = 1_5_0
__lowercase = 1_6
__lowercase = "huggingface/label-files"
__lowercase = "ade20k-id2label.json"
__lowercase = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowercase = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
__lowercase = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
__lowercase = name.replace("patch_embed" , "" )
if "pos_embed" in name:
__lowercase = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
__lowercase = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
__lowercase = name.replace("proj" , "projection" )
if "blocks" in name:
__lowercase = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
__lowercase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowercase = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
__lowercase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
__lowercase = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
__lowercase = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
__lowercase = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
__lowercase = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
__lowercase = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
__lowercase = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
__lowercase = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
__lowercase = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
__lowercase = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
__lowercase = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
__lowercase = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
__lowercase = name.replace("conv1" , "convolution1" )
if "conv2" in name:
__lowercase = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowercase = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowercase = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowercase = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowercase = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowercase = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
__lowercase = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
__lowercase = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
__lowercase = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
__lowercase = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
__lowercase = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
__lowercase = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
__lowercase = name.replace("pretrained" , "dpt" )
if "bn" in name:
__lowercase = name.replace("bn" , "batch_norm" )
if "head" in name:
__lowercase = name.replace("head" , "head.head" )
if "encoder.norm" in name:
__lowercase = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
__lowercase = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
__lowercase = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
__lowercase = name.replace(".." , "." )
if "stem.conv" in name:
__lowercase = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
__lowercase = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
__lowercase = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
__lowercase = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
__lowercase = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
__lowercase = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
__lowercase = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
__lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: config.hidden_size, :]
__lowercase = in_proj_bias[: config.hidden_size]
__lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase = in_proj_weight[
-config.hidden_size :, :
]
__lowercase = in_proj_bias[-config.hidden_size :]
def snake_case_ ( ):
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = get_dpt_config(_SCREAMING_SNAKE_CASE )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__lowercase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(_SCREAMING_SNAKE_CASE )
__lowercase = val
# read in qkv matrices
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
__lowercase = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ) if "ade" in checkpoint_url else DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# Check outputs on an image
__lowercase = 4_8_0 if "ade" in checkpoint_url else 3_8_4
__lowercase = DPTImageProcessor(size=_SCREAMING_SNAKE_CASE )
__lowercase = prepare_img()
__lowercase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="pt" )
# forward pass
__lowercase = model(**_SCREAMING_SNAKE_CASE ).logits if "ade" in checkpoint_url else model(**_SCREAMING_SNAKE_CASE ).predicted_depth
if show_prediction:
__lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=_SCREAMING_SNAKE_CASE , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case__ : List[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 402 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : List[str] = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: str = '''open-llama'''
def __init__( self , __snake_case=1_0_0_0_0_0 , __snake_case=4_0_9_6 , __snake_case=1_1_0_0_8 , __snake_case=3_2 , __snake_case=3_2 , __snake_case="silu" , __snake_case=2_0_4_8 , __snake_case=0.02 , __snake_case=1e-6 , __snake_case=True , __snake_case=0 , __snake_case=1 , __snake_case=2 , __snake_case=False , __snake_case=True , __snake_case=0.1 , __snake_case=0.1 , __snake_case=True , __snake_case=True , __snake_case=None , **__snake_case , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase: List[Any] = vocab_size
UpperCAmelCase: str = max_position_embeddings
UpperCAmelCase: List[Any] = hidden_size
UpperCAmelCase: Optional[Any] = intermediate_size
UpperCAmelCase: Union[str, Any] = num_hidden_layers
UpperCAmelCase: Union[str, Any] = num_attention_heads
UpperCAmelCase: Dict = hidden_act
UpperCAmelCase: Tuple = initializer_range
UpperCAmelCase: Union[str, Any] = rms_norm_eps
UpperCAmelCase: Optional[int] = use_cache
UpperCAmelCase: Dict = kwargs.pop(
"use_memorry_efficient_attention" , __snake_case )
UpperCAmelCase: Optional[int] = hidden_dropout_prob
UpperCAmelCase: Any = attention_dropout_prob
UpperCAmelCase: Optional[Any] = use_stable_embedding
UpperCAmelCase: Tuple = shared_input_output_embedding
UpperCAmelCase: Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , tie_word_embeddings=__snake_case , **__snake_case , )
def A__ ( self ) -> List[str]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'got {self.rope_scaling}' )
UpperCAmelCase: Union[str, Any] = self.rope_scaling.get("type" , __snake_case )
UpperCAmelCase: Any = self.rope_scaling.get("factor" , __snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(__snake_case , __snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 166 |
def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase: Optional[Any] = ""
for i in table:
res += inp[i - 1]
return res
def __UpperCAmelCase ( snake_case_ : Optional[Any] ):
'''simple docstring'''
return data[1:] + data[0]
def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase: Optional[int] = ""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : Tuple ):
'''simple docstring'''
UpperCAmelCase: List[str] = int("0b" + data[0] + data[-1] , 2 )
UpperCAmelCase: List[Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __UpperCAmelCase ( snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase: Tuple = message[:4]
UpperCAmelCase: List[str] = message[4:]
UpperCAmelCase: str = apply_table(snake_case_ , snake_case_ )
UpperCAmelCase: Dict = xor(snake_case_ , snake_case_ )
UpperCAmelCase: Dict = apply_sbox(snake_case_ , temp[:4] ) # noqa: E741
UpperCAmelCase: Any = apply_sbox(snake_case_ , temp[4:] )
UpperCAmelCase: List[Any] = "0" * (2 - len(snake_case_ )) + l # noqa: E741
UpperCAmelCase: Any = "0" * (2 - len(snake_case_ )) + r
UpperCAmelCase: Union[str, Any] = apply_table(l + r , snake_case_ )
UpperCAmelCase: List[Any] = xor(snake_case_ , snake_case_ )
return temp + right
if __name__ == "__main__":
snake_case_ : List[Any] = input('Enter 10 bit key: ')
snake_case_ : List[Any] = input('Enter 8 bit message: ')
snake_case_ : Dict = [6, 3, 7, 4, 8, 5, 1_0, 9]
snake_case_ : Optional[int] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
snake_case_ : Union[str, Any] = [2, 4, 3, 1]
snake_case_ : Union[str, Any] = [2, 6, 3, 1, 4, 8, 5, 7]
snake_case_ : Optional[int] = [4, 1, 3, 5, 7, 2, 8, 6]
snake_case_ : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1]
snake_case_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
snake_case_ : Optional[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
snake_case_ : Optional[int] = apply_table(key, paa_table)
snake_case_ : Dict = temp[:5]
snake_case_ : Union[str, Any] = temp[5:]
snake_case_ : str = left_shift(left)
snake_case_ : Dict = left_shift(right)
snake_case_ : Tuple = apply_table(left + right, pa_table)
snake_case_ : Dict = left_shift(left)
snake_case_ : int = left_shift(right)
snake_case_ : List[str] = left_shift(left)
snake_case_ : List[Any] = left_shift(right)
snake_case_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
snake_case_ : List[Any] = apply_table(message, IP)
snake_case_ : Any = function(expansion, sa, sa, keya, temp)
snake_case_ : int = temp[4:] + temp[:4]
snake_case_ : int = function(expansion, sa, sa, keya, temp)
snake_case_ : Optional[int] = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
snake_case_ : Tuple = apply_table(CT, IP)
snake_case_ : List[str] = function(expansion, sa, sa, keya, temp)
snake_case_ : int = temp[4:] + temp[:4]
snake_case_ : Tuple = function(expansion, sa, sa, keya, temp)
snake_case_ : Tuple = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 166 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.