code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def _a ( UpperCAmelCase__ ) -> int:
__SCREAMING_SNAKE_CASE = [1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 0, 0
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 2
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 3
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 5
for _ in range(1 , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
ugly_nums.append(UpperCAmelCase__ )
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'''{ugly_numbers(200) = }''')
| 482 |
"""simple docstring"""
def _a ( UpperCAmelCase__ ) -> List[str]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
for i in range(n - 1 ):
for j in range(i + 1 , UpperCAmelCase__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( UpperCAmelCase__ ) -> int:
if len(UpperCAmelCase__ ) <= 1:
return arr, 0
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) // 2
__SCREAMING_SNAKE_CASE = arr[0:mid]
__SCREAMING_SNAKE_CASE = arr[mid:]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = _count_cross_inversions(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = 0
while i < len(UpperCAmelCase__ ) and j < len(UpperCAmelCase__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCAmelCase__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCAmelCase__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__SCREAMING_SNAKE_CASE = count_inversions_bf(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(UpperCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , UpperCAmelCase__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__SCREAMING_SNAKE_CASE = count_inversions_bf(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(UpperCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , UpperCAmelCase__ )
# an empty list should also have zero inversions
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = count_inversions_bf(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(UpperCAmelCase__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 482 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCAmelCase :
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=3 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCamelCase__ , )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
lowerCamelCase_ = FalconModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowerCamelCase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> int:
'''simple docstring'''
lowerCamelCase_ = True
lowerCamelCase_ = FalconModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowerCamelCase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Any:
'''simple docstring'''
lowerCamelCase_ = FalconForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = FalconForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
lowerCamelCase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowerCamelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )['''hidden_states'''][0]
lowerCamelCase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )['''hidden_states'''][0]
# select random slice
lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
__lowercase :str = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase :Dict = (FalconForCausalLM,) if is_torch_available() else ()
__lowercase :Any = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase :List[str] = False
__lowercase :Dict = False
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = FalconModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ , *lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCamelCase_ = alibi
self.model_tester.create_and_check_model(UpperCamelCase__ , *UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = input_dict['''input_ids''']
lowerCamelCase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowerCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ = FalconForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = '''single_label_classification'''
lowerCamelCase_ = input_dict['''input_ids''']
lowerCamelCase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowerCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ = FalconForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = input_dict['''input_ids''']
lowerCamelCase_ = FalconForCausalLM(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(UpperCamelCase__ , use_cache=UpperCamelCase__ )
lowerCamelCase_ = input_ids.shape[0]
lowerCamelCase_ = model._convert_to_rw_cache(result.past_key_values )
lowerCamelCase_ = model._convert_cache_to_standard_format(UpperCamelCase__ , UpperCamelCase__ )
for layer in range(len(UpperCamelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = '''multi_label_classification'''
lowerCamelCase_ = input_dict['''input_ids''']
lowerCamelCase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowerCamelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase_ = FalconForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for model_class in self.all_generative_model_classes:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCamelCase__ , '''use_cache''' ):
return
lowerCamelCase_ = model_class(UpperCamelCase__ ).to(UpperCamelCase__ )
if "use_cache" not in inputs:
lowerCamelCase_ = True
lowerCamelCase_ = model(**UpperCamelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCamelCase_ = (
getattr(UpperCamelCase__ , '''decoder_layers''' , UpperCamelCase__ )
or getattr(UpperCamelCase__ , '''num_decoder_layers''' , UpperCamelCase__ )
or config.num_hidden_layers
)
lowerCamelCase_ = getattr(UpperCamelCase__ , '''num_kv_heads''' , config.num_attention_heads )
lowerCamelCase_ = getattr(UpperCamelCase__ , '''d_model''' , config.hidden_size )
lowerCamelCase_ = embed_dim // num_attention_heads
lowerCamelCase_ = outputs['''past_key_values''']
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = inputs['''input_ids'''].shape
for i in range(UpperCamelCase__ ):
if config.new_decoder_architecture:
lowerCamelCase_ = config.num_attention_heads
elif config.multi_query:
lowerCamelCase_ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
lowerCamelCase_ = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(UpperCamelCase__ )
lowerCamelCase_ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(UpperCamelCase__ )
lowerCamelCase_ = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
lowerCamelCase_ = model.generate(**UpperCamelCase__ , do_sample=UpperCamelCase__ , max_new_tokens=19 )
lowerCamelCase_ = tokenizer.batch_decode(UpperCamelCase__ )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase_ = FalconForCausalLM.from_pretrained(UpperCamelCase__ )
model.eval()
model.to(UpperCamelCase__ )
lowerCamelCase_ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(UpperCamelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCamelCase__ , do_sample=UpperCamelCase__ , max_new_tokens=4 )
model.generate(**UpperCamelCase__ , do_sample=UpperCamelCase__ , max_new_tokens=4 )
model.generate(**UpperCamelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase_ = FalconForCausalLM.from_pretrained(UpperCamelCase__ )
model.eval()
model.to(device=UpperCamelCase__ )
lowerCamelCase_ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(UpperCamelCase__ )
# Test results are the same with and without cache
lowerCamelCase_ = model.generate(**UpperCamelCase__ , do_sample=UpperCamelCase__ , max_new_tokens=20 , use_cache=UpperCamelCase__ )
lowerCamelCase_ = model.generate(**UpperCamelCase__ , do_sample=UpperCamelCase__ , max_new_tokens=20 , use_cache=UpperCamelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 ) | 66 |
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = max_length
lowerCamelCase_ = vocab
lowerCamelCase_ = merges
lowerCamelCase_ = BytePairTokenizer(UpperCamelCase__ , UpperCamelCase__ , sequence_length=UpperCamelCase__ )
@classmethod
def _lowerCAmelCase ( cls , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = [''' '''.join(UpperCamelCase__ ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase_ = tokenizer.get_vocab()
return cls(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def _lowerCAmelCase ( cls , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
lowerCamelCase_ = GPTaTokenizer.from_pretrained(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
return cls.from_tokenizer(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def _lowerCAmelCase ( cls , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return cls(**UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.tf_tokenizer(UpperCamelCase__ )
lowerCamelCase_ = tf.ones_like(UpperCamelCase__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase_ = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase_ , lowerCamelCase_ = pad_model_inputs(
UpperCamelCase__ , max_seq_length=UpperCamelCase__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids} | 66 | 1 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCAmelCase__ ( __snake_case="" ) -> int:
_A = tempfile.mkdtemp()
return os.path.join(__snake_case , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> str:
"""simple docstring"""
_A = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_A = AgentAudio(A_ )
_A = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(A_ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(A_ ) )
# Ensure that the file contains the same value as the original tensor
_A , _A = sf.read(A_ )
self.assertTrue(torch.allclose(A_ , torch.tensor(A_ ) , atol=1e-4 ) )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_A = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_A = get_new_path(suffix='''.wav''' )
sf.write(A_ , A_ , 1_6_0_0_0 )
_A = AgentAudio(A_ )
self.assertTrue(torch.allclose(A_ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , A_ )
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> int:
"""simple docstring"""
_A = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
_A = AgentImage(A_ )
_A = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(A_ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(A_ ) )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_A = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_A = Image.open(A_ )
_A = AgentImage(A_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(A_ ) )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_A = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_A = Image.open(A_ )
_A = AgentImage(A_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(A_ ) )
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_A = '''Hey!'''
_A = AgentText(A_ )
self.assertEqual(A_ , agent_type.to_string() )
self.assertEqual(A_ , agent_type.to_raw() )
self.assertEqual(A_ , A_ ) | 317 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'donut-swin'
__UpperCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self, A_=224, A_=4, A_=3, A_=96, A_=[2, 2, 6, 2], A_=[3, 6, 12, 24], A_=7, A_=4.0, A_=True, A_=0.0, A_=0.0, A_=0.1, A_="gelu", A_=False, A_=0.02, A_=1E-5, **A_, ) -> List[str]:
super().__init__(**A_ )
UpperCAmelCase__ =image_size
UpperCAmelCase__ =patch_size
UpperCAmelCase__ =num_channels
UpperCAmelCase__ =embed_dim
UpperCAmelCase__ =depths
UpperCAmelCase__ =len(A_ )
UpperCAmelCase__ =num_heads
UpperCAmelCase__ =window_size
UpperCAmelCase__ =mlp_ratio
UpperCAmelCase__ =qkv_bias
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =drop_path_rate
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =use_absolute_embeddings
UpperCAmelCase__ =layer_norm_eps
UpperCAmelCase__ =initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase__ =int(embed_dim * 2 ** (len(A_ ) - 1) )
| 625 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , lowercase : int , lowercase : int=13 , lowercase : Any=7 , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : int=True , lowercase : Optional[Any]=True , lowercase : Union[str, Any]=99 , lowercase : Union[str, Any]=32 , lowercase : List[Any]=5 , lowercase : List[str]=4 , lowercase : Union[str, Any]=37 , lowercase : int="gelu" , lowercase : List[str]=0.1 , lowercase : Any=0.1 , lowercase : List[str]=512 , lowercase : List[str]=16 , lowercase : Dict=2 , lowercase : Optional[Any]=0.02 , lowercase : Dict=4 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_attention_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_choices
def snake_case__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_attention_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Tuple = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = FlaxAlbertModelTester(self )
@slow
def snake_case__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("""albert-base-v2""" )
__lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
__lowercase = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__lowercase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowercase = model(lowercase , attention_mask=lowercase )[0]
__lowercase = (1, 11, 768)
self.assertEqual(output.shape , lowercase )
__lowercase = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowercase , atol=1E-4 ) )
| 634 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
UpperCamelCase__ = "scheduler_config.json"
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[Any] = 1
lowercase__ : Tuple = 2
lowercase__ : Union[str, Any] = 3
lowercase__ : Union[str, Any] = 4
lowercase__ : str = 5
lowercase__ : Any = 6
lowercase__ : Any = 7
lowercase__ : List[str] = 8
lowercase__ : Union[str, Any] = 9
lowercase__ : int = 10
lowercase__ : List[str] = 11
lowercase__ : List[Any] = 12
lowercase__ : str = 13
lowercase__ : Optional[int] = 14
@dataclass
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : torch.FloatTensor
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : Optional[int] = SCHEDULER_CONFIG_NAME
lowercase__ : int = []
lowercase__ : Dict = True
@classmethod
def snake_case__ ( cls : str , lowercase : Dict[str, Any] = None , lowercase : Optional[str] = None , lowercase : Any=False , **lowercase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase = cls.load_config(
pretrained_model_name_or_path=lowercase , subfolder=lowercase , return_unused_kwargs=lowercase , return_commit_hash=lowercase , **lowercase , )
return cls.from_config(lowercase , return_unused_kwargs=lowercase , **lowercase )
def snake_case__ ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = False , **lowercase : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.save_config(save_directory=lowercase , push_to_hub=lowercase , **lowercase )
@property
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def snake_case__ ( cls : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = list(set([cls.__name__] + cls._compatibles ) )
__lowercase = importlib.import_module(__name__.split(""".""" )[0] )
__lowercase = [
getattr(lowercase , lowercase ) for c in compatible_classes_str if hasattr(lowercase , lowercase )
]
return compatible_classes
| 634 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :int = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F"Building PyTorch model from configuration: {config}" )
lowerCAmelCase__ :Union[str, Any] = BertForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 93 |
lowerCAmelCase : Dict = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def _lowercase ( __UpperCamelCase : dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
snake_case__ = set()
# keep track of all the paths to be checked
snake_case__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
snake_case__ = queue.pop(0 )
# get the last node from the path
snake_case__ = path[-1]
if node not in explored:
snake_case__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
snake_case__ = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def _lowercase ( __UpperCamelCase : dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
snake_case__ = [start]
snake_case__ = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
snake_case__ = {start: 0, target: -1}
while queue:
snake_case__ = queue.pop(0 )
if node == target:
snake_case__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
snake_case__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 214 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_A : List[str] = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_A : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = "maskformer"
_UpperCAmelCase : Union[str, Any] = {"hidden_size": "mask_feature_size"}
_UpperCAmelCase : Tuple = ["resnet", "swin"]
_UpperCAmelCase : Dict = ["detr"]
def __init__( self : List[str] , A : int = 2_5_6 , A : int = 2_5_6 , A : float = 0.1 , A : bool = False , A : Optional[Dict] = None , A : Optional[Dict] = None , A : float = 0.02 , A : float = 1.0 , A : float = 1.0 , A : float = 1.0 , A : float = 20.0 , A : Optional[bool] = None , **A : Optional[Any] , ) ->str:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowerCamelCase__ : List[Any] = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCamelCase__ : Any = backbone_config.pop('''model_type''' )
lowerCamelCase__ : str = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : Union[str, Any] = config_class.from_dict(UpperCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowerCamelCase__ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
lowerCamelCase__ : Any = (
decoder_config.pop('''model_type''' ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCamelCase__ : List[Any] = CONFIG_MAPPING[decoder_type]
lowerCamelCase__ : Union[str, Any] = config_class.from_dict(UpperCAmelCase__ )
lowerCamelCase__ : Union[str, Any] = backbone_config
lowerCamelCase__ : int = decoder_config
# main feature dimension for the model
lowerCamelCase__ : List[str] = fpn_feature_size
lowerCamelCase__ : Tuple = mask_feature_size
# initializer
lowerCamelCase__ : int = init_std
lowerCamelCase__ : Any = init_xavier_std
# Hungarian matcher && loss
lowerCamelCase__ : Tuple = cross_entropy_weight
lowerCamelCase__ : int = dice_weight
lowerCamelCase__ : Any = mask_weight
lowerCamelCase__ : str = use_auxiliary_loss
lowerCamelCase__ : List[Any] = no_object_weight
lowerCamelCase__ : List[str] = output_auxiliary_logits
lowerCamelCase__ : int = self.decoder_config.encoder_attention_heads
lowerCamelCase__ : str = self.decoder_config.num_hidden_layers
super().__init__(**UpperCAmelCase__ )
@classmethod
def __lowerCamelCase ( cls : Tuple , A : PretrainedConfig , A : PretrainedConfig , **A : List[Any] ) ->Dict:
return cls(
backbone_config=UpperCAmelCase__ , decoder_config=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __lowerCamelCase ( self : Dict ) ->Dict[str, any]:
lowerCamelCase__ : List[Any] = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Any = self.backbone_config.to_dict()
lowerCamelCase__ : Optional[int] = self.decoder_config.to_dict()
lowerCamelCase__ : Optional[Any] = self.__class__.model_type
return output
| 721 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A : Optional[Any] = logging.get_logger(__name__)
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Tuple = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
lowerCamelCase__ : Tuple = 1024
lowerCamelCase__ : Any = 4096
lowerCamelCase__ : Optional[Any] = 24
lowerCamelCase__ : Dict = 16
lowerCamelCase__ : Optional[Any] = [5, 11, 17, 23]
lowerCamelCase__ : str = [256, 512, 1024, 1024]
lowerCamelCase__ : List[str] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ : List[str] = 768
lowerCamelCase__ : Any = [1, 1, 1, 0.5]
lowerCamelCase__ : Dict = [256, 512, 768, 768]
lowerCamelCase__ : Dict = 150
lowerCamelCase__ : str = 16
lowerCamelCase__ : List[Any] = (1, 384, 384)
lowerCamelCase__ : Any = False
lowerCamelCase__ : int = '''project'''
if "ade" in checkpoint_url:
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : List[Any] = 768
lowerCamelCase__ : int = [1, 1, 1, 0.5]
lowerCamelCase__ : Any = 150
lowerCamelCase__ : Dict = 16
lowerCamelCase__ : Optional[Any] = '''huggingface/label-files'''
lowerCamelCase__ : Any = '''ade20k-id2label.json'''
lowerCamelCase__ : Optional[Any] = json.load(open(cached_download(hf_hub_url(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowerCamelCase__ : Any = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Any = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : Optional[Any] = [1, 150, 480, 480]
return config, expected_shape
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ : Optional[int] = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
lowerCamelCase__ : Tuple = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
lowerCamelCase__ : int = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
lowerCamelCase__ : List[Any] = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
lowerCamelCase__ : str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
lowerCamelCase__ : Any = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
lowerCamelCase__ : List[Any] = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
lowerCamelCase__ : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : List[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ : Any = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
lowerCamelCase__ : Tuple = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
lowerCamelCase__ : int = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
lowerCamelCase__ : Any = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
lowerCamelCase__ : Optional[int] = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
lowerCamelCase__ : Tuple = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
lowerCamelCase__ : Optional[Any] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ : List[str] = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
lowerCamelCase__ : str = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
lowerCamelCase__ : List[str] = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
lowerCamelCase__ : Optional[int] = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
lowerCamelCase__ : int = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
lowerCamelCase__ : List[str] = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ : Optional[Any] = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ : Dict = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ : str = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ : Optional[int] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ : Dict = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ : str = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ : int = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ : Optional[int] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ : Dict = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ : Tuple = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
lowerCamelCase__ : Any = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
lowerCamelCase__ : List[str] = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
lowerCamelCase__ : Optional[Any] = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
lowerCamelCase__ : List[Any] = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
lowerCamelCase__ : List[str] = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
lowerCamelCase__ : Optional[Any] = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
lowerCamelCase__ : str = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
lowerCamelCase__ : List[Any] = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ : Tuple = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ : Union[str, Any] = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
lowerCamelCase__ : int = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ : int = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : Dict = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
lowerCamelCase__ : Optional[int] = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ : str = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : List[Any] = in_proj_bias[-config.hidden_size :]
def _a ( ) -> str:
"""simple docstring"""
lowerCamelCase__ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : Optional[Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : str = get_dpt_config(UpperCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ : int = torch.load(UpperCAmelCase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ : Union[str, Any] = state_dict.pop(UpperCAmelCase )
lowerCamelCase__ : List[str] = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase )
# load HuggingFace model
lowerCamelCase__ : Optional[Any] = DPTForSemanticSegmentation(UpperCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# Check outputs on an image
lowerCamelCase__ : List[str] = 480 if '''ade''' in checkpoint_url else 384
lowerCamelCase__ : List[Any] = DPTImageProcessor(size=UpperCAmelCase )
lowerCamelCase__ : Optional[int] = prepare_img()
lowerCamelCase__ : List[str] = image_processor(UpperCAmelCase , return_tensors='''pt''' )
# forward pass
lowerCamelCase__ : Tuple = model(**UpperCAmelCase ).logits if '''ade''' in checkpoint_url else model(**UpperCAmelCase ).predicted_depth
if show_prediction:
lowerCamelCase__ : Union[str, Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=UpperCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
_A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
_A : List[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 130 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : List[str] = KandinskyImgaImgPipeline
_UpperCamelCase : List[str] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : Dict = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Optional[int] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Tuple = False
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return 3_2
@property
def __a ( self ):
return self.time_input_dim
@property
def __a ( self ):
return self.time_input_dim * 4
@property
def __a ( self ):
return 1_0_0
@property
def __a ( self ):
_lowercase : int = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Tuple = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_lowercase : Any = MultilingualCLIP(_lowerCAmelCase )
_lowercase : List[str] = text_encoder.eval()
return text_encoder
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Tuple = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowercase : Tuple = UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def __a ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self ):
_lowercase : Dict = self.dummy_text_encoder
_lowercase : Dict = self.dummy_tokenizer
_lowercase : Any = self.dummy_unet
_lowercase : Optional[int] = self.dummy_movq
_lowercase : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.0_00_85,
'beta_end': 0.0_12,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowercase : int = DDIMScheduler(**_lowerCAmelCase )
_lowercase : Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
_lowercase : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCAmelCase )
# create init_image
_lowercase : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase : Union[str, Any] = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : str = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : Optional[int] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : Optional[int] = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __a ( self ):
_lowercase : List[Any] = 'cpu'
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : str = self.pipeline_class(**_lowerCAmelCase )
_lowercase : List[str] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Dict = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
_lowercase : Tuple = output.images
_lowercase : Union[str, Any] = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
_lowercase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowercase : Union[str, Any] = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
_lowercase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
_lowercase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowercase : Optional[int] = 'A red cartoon frog, 4k'
_lowercase : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
_lowercase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
_lowercase : Tuple = pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
_lowercase , _lowercase : Any = pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowercase : Any = pipeline(
_lowerCAmelCase , image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='np' , )
_lowercase : Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 66 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : CLIPSegForImageSegmentation , lowerCamelCase : CLIPSegProcessor , lowerCamelCase : AutoencoderKL , lowerCamelCase : CLIPTextModel , lowerCamelCase : CLIPTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase : StableDiffusionSafetyChecker , lowerCamelCase : CLIPImageProcessor , ) -> Tuple:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
__snake_case : Tuple = (
F'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
F' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
__snake_case : Any = dict(scheduler.config )
__snake_case : List[Any] = 1
__snake_case : Tuple = FrozenDict(lowerCamelCase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
__snake_case : List[str] = (
F'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
__snake_case : List[str] = dict(scheduler.config )
__snake_case : List[str] = True
__snake_case : Any = FrozenDict(lowerCamelCase )
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowerCamelCase , segmentation_processor=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , )
def __snake_case ( self : Dict , lowerCamelCase : Optional[Union[str, int]] = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Any:
self.enable_attention_slicing(lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__snake_case : Optional[int] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : int ) -> Any:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[Any] , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , lowerCamelCase : str , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Dict , ) -> List[str]:
__snake_case : Tuple = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
__snake_case : str = self.segmentation_model(**lowerCamelCase )
__snake_case : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__snake_case : List[Any] = self.numpy_to_pil(lowerCamelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__snake_case : Tuple = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCamelCase , image=lowerCamelCase , mask_image=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , )
| 81 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __UpperCAmelCase ( __a : Optional[int] ,__a : int=False ) -> Union[str, Any]:
"""simple docstring"""
try:
_a : Optional[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_a : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
_a : List[str] = strtobool(a_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
a__ = parse_flag_from_env('''RUN_SLOW''', default=False)
a__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
a__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
a__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def __UpperCAmelCase ( __a : Optional[Any] ) -> Dict:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_a : Dict = unittest.skip('''test requires faiss''' )(a_ )
return test_case
def __UpperCAmelCase ( __a : Any ) -> Optional[int]:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_a : int = unittest.skip('''test requires regex''' )(a_ )
return test_case
def __UpperCAmelCase ( __a : List[str] ) -> List[str]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_a : Optional[int] = unittest.skip('''test requires elasticsearch''' )(a_ )
return test_case
def __UpperCAmelCase ( __a : int ) -> Tuple:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_a : List[str] = unittest.skip('''test requires sqlalchemy''' )(a_ )
return test_case
def __UpperCAmelCase ( __a : Optional[Any] ) -> int:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_a : List[Any] = unittest.skip('''test requires PyTorch''' )(a_ )
return test_case
def __UpperCAmelCase ( __a : int ) -> List[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
_a : Any = unittest.skip('''test requires TensorFlow''' )(a_ )
return test_case
def __UpperCAmelCase ( __a : Dict ) -> Any:
"""simple docstring"""
if not config.JAX_AVAILABLE:
_a : List[str] = unittest.skip('''test requires JAX''' )(a_ )
return test_case
def __UpperCAmelCase ( __a : Optional[Any] ) -> Any:
"""simple docstring"""
if not config.PIL_AVAILABLE:
_a : List[Any] = unittest.skip('''test requires Pillow''' )(a_ )
return test_case
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(a_ )
else:
return test_case
def __UpperCAmelCase ( __a : Tuple ) -> Optional[int]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(a_ )
else:
return test_case
def __UpperCAmelCase ( __a : Union[str, Any] ) -> int:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(a_ )
else:
return test_case
def __UpperCAmelCase ( __a : Tuple ) -> int:
"""simple docstring"""
def _require_spacy_model(__a : Optional[int] ):
try:
import spacy # noqa F401
spacy.load(a_ )
except ImportError:
return unittest.skip('''test requires spacy''' )(a_ )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(a_ ) )(a_ )
else:
return test_case
return _require_spacy_model
def __UpperCAmelCase ( __a : str ) -> int:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(a_ )
else:
return test_case
def __UpperCAmelCase ( __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(a_ )
else:
return test_case
def __UpperCAmelCase ( __a : Dict ) -> str:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_a : Union[str, Any] = unittest.skip('''test is slow''' )(a_ )
return test_case
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_a : Optional[int] = unittest.skip('''test is local''' )(a_ )
return test_case
def __UpperCAmelCase ( __a : str ) -> str:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_a : int = unittest.skip('''test is packaged''' )(a_ )
return test_case
def __UpperCAmelCase ( __a : str ) -> Tuple:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_a : Optional[int] = unittest.skip('''test requires remote''' )(a_ )
return test_case
def __UpperCAmelCase ( *__a : int ) -> Tuple:
"""simple docstring"""
def decorate(cls : Any ):
for name, fn in cls.__dict__.items():
if callable(a_ ) and name.startswith('''test''' ):
for decorator in decorators:
_a : Optional[Any] = decorator(a_ )
setattr(cls ,a_ ,a_ )
return cls
return decorate
class UpperCAmelCase_ ( _UpperCAmelCase ):
"""simple docstring"""
pass
class UpperCAmelCase_ ( _UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : Optional[Any] = 2
@contextmanager
def __UpperCAmelCase ( __a : List[str]=OfflineSimulationMode.CONNECTION_FAILS ,__a : List[Any]=1E-16 ) -> str:
"""simple docstring"""
_a : List[Any] = requests.Session().request
def timeout_request(__a : int ,__a : int ,__a : Dict ,**__a : Tuple ):
# Change the url to an invalid url so that the connection hangs
_a : Union[str, Any] = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_a : Dict = timeout
try:
return online_request(a_ ,a_ ,**a_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_a : int = url
_a : Any = e.args[0]
_a : Optional[int] = (max_retry_error.args[0].replace('''10.255.255.1''' ,F"""OfflineMock[{url}]""" ),)
_a : Union[str, Any] = (max_retry_error,)
raise
def raise_connection_error(__a : Dict ,__a : Dict ,**__a : List[Any] ):
raise requests.ConnectionError('''Offline mode is enabled.''' ,request=a_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' ,a_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' ,a_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,a_ ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __UpperCAmelCase ( *__a : Optional[int] ,**__a : Any ) -> List[Any]:
"""simple docstring"""
_a : Union[str, Any] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*a_ ,**a_ ) as tmp_dir:
try:
os.chdir(a_ )
yield
finally:
os.chdir(a_ )
@contextmanager
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
import gc
gc.collect()
_a : List[str] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
import gc
gc.collect()
_a : Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __UpperCAmelCase ( __a : Any ,__a : str ) -> List[str]:
"""simple docstring"""
return deepcopy(a_ ).integers(0 ,100 ,10 ).tolist() == deepcopy(a_ ).integers(0 ,100 ,10 ).tolist()
def __UpperCAmelCase ( __a : Tuple ) -> Tuple:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__a : Dict ,*__a : int ,**__a : Tuple ):
try:
return func(*a_ ,**a_ )
except HTTPError as err:
if str(a_ ).startswith('''500''' ) or str(a_ ).startswith('''502''' ):
pytest.xfail(str(a_ ) )
raise err
return decorator.decorator(_wrapper ,a_ )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> List[str]:
_a : Any = returncode
_a : int = stdout
_a : Tuple = stderr
async def __UpperCAmelCase ( __a : List[str] ,__a : Optional[int] ) -> Tuple:
"""simple docstring"""
while True:
_a : int = await stream.readline()
if line:
callback(a_ )
else:
break
async def __UpperCAmelCase ( __a : Optional[int] ,__a : Optional[Any]=None ,__a : int=None ,__a : int=None ,__a : List[str]=False ,__a : Optional[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' ,''' '''.join(a_ ) )
_a : Dict = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=a_ ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=a_ ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_a : str = []
_a : Tuple = []
def tee(__a : int ,__a : Tuple ,__a : Any ,__a : Optional[Any]="" ):
_a : Union[str, Any] = line.decode('''utf-8''' ).rstrip()
sink.append(a_ )
if not quiet:
print(a_ ,a_ ,file=a_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda __a : tee(a_ ,a_ ,sys.stdout ,label='''stdout:''' ) ),
_read_stream(p.stderr ,lambda __a : tee(a_ ,a_ ,sys.stderr ,label='''stderr:''' ) ),
] ,timeout=a_ ,)
return _RunOutput(await p.wait() ,a_ ,a_ )
def __UpperCAmelCase ( __a : Optional[Any] ,__a : Any=None ,__a : List[str]=None ,__a : Tuple=180 ,__a : List[Any]=False ,__a : str=True ) -> _RunOutput:
"""simple docstring"""
_a : int = asyncio.get_event_loop()
_a : Any = loop.run_until_complete(
_stream_subprocess(a_ ,env=a_ ,stdin=a_ ,timeout=a_ ,quiet=a_ ,echo=a_ ) )
_a : Optional[Any] = ''' '''.join(a_ )
if result.returncode > 0:
_a : List[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : List[Any] = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' )
_a : Union[str, Any] = re.sub(R'''^gw''' ,'''''' ,a_ ,0 ,re.M )
return int(a_ )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : Any = 29_500
_a : str = pytest_xdist_worker_id()
return port + uniq_delta
| 718 |
def __UpperCAmelCase ( __a : int = 2_000_000 ) -> int:
"""simple docstring"""
_a : List[str] = [0 for i in range(n + 1 )]
_a : Tuple = 1
_a : Tuple = 1
for i in range(2 ,int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i ,n + 1 ,__a ):
_a : List[str] = 1
_a : List[Any] = 0
for i in range(__a ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 578 | 0 |
from __future__ import annotations
import time
__lowercase : Optional[Any] = list[tuple[int, int]]
__lowercase : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Optional[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Tuple = pos_x
snake_case : List[str] = pos_y
snake_case : Optional[int] = (pos_y, pos_x)
snake_case : List[str] = goal_x
snake_case : str = goal_y
snake_case : int = parent
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Union[str, Any] = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = [self.start]
snake_case : int = False
def snake_case_ ( self ):
'''simple docstring'''
while self.node_queue:
snake_case : Any = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case : List[Any] = True
return self.retrace_path(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = self.get_successors(SCREAMING_SNAKE_CASE_ )
for node in successors:
self.node_queue.append(SCREAMING_SNAKE_CASE_ )
if not self.reached:
return [self.start.pos]
return None
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : int = []
for action in delta:
snake_case : Optional[Any] = parent.pos_x + action[1]
snake_case : Optional[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,self.target.pos_y ,self.target.pos_x ,SCREAMING_SNAKE_CASE_ ) )
return successors
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[Any] = node
snake_case : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case : Any = current_node.parent
path.reverse()
return path
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : int = BreadthFirstSearch(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Any = BreadthFirstSearch(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : int = False
def snake_case_ ( self ):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case : List[str] = self.fwd_bfs.node_queue.pop(0 )
snake_case : Union[str, Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case : Optional[int] = True
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = current_bwd_node
snake_case : Any = current_fwd_node
snake_case : Optional[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(SCREAMING_SNAKE_CASE_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE_ )
bwd_path.pop()
bwd_path.reverse()
snake_case : Optional[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[Any] = (0, 0)
__lowercase : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Optional[int] = time.time()
__lowercase : Optional[int] = BreadthFirstSearch(init, goal)
__lowercase : Any = bfs.search()
__lowercase : List[str] = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : List[str] = time.time()
__lowercase : Dict = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Union[str, Any] = bd_bfs.search()
__lowercase : Optional[Any] = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 36 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "AutoImageProcessor"
lowerCamelCase_ = "AutoTokenizer"
def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict:
"""simple docstring"""
super().__init__(__A , __A )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"] | 6 | 0 |
from __future__ import annotations
import math
def A_ ( snake_case : int , snake_case : int , snake_case : bool , snake_case : list[int] , snake_case : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(snake_case ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , snake_case , snake_case , snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case , snake_case , snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , snake_case , snake_case , snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case , snake_case , snake_case ) , )
def A_ ( ) -> None:
'''simple docstring'''
__UpperCamelCase = [90, 23, 6, 33, 21, 65, 123, 34423]
__UpperCamelCase = math.log(len(snake_case ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , snake_case , snake_case , snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 451 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : int = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 451 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : str = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 633 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = pipeline(
task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : int = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], )
@unittest.skip("No models are available in TF" )
def A_ ( self : str ) -> Dict:
"""simple docstring"""
pass
@slow
@require_torch
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipeline(
task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : List[str] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
], )
SCREAMING_SNAKE_CASE__ : Any = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
SCREAMING_SNAKE_CASE__ : Any = audio_classifier(
[audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
@unittest.skip("No models are available in TF" )
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
pass
| 663 | 0 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase__ ( __lowercase : List[str] ) -> Tuple:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowercase__ ( __lowercase : Optional[Any] , __lowercase : Dict ) -> Any:
"""simple docstring"""
return (-y * np.log(__lowercase ) - (1 - y) * np.log(1 - h )).mean()
def lowercase__ ( __lowercase : str , __lowercase : str , __lowercase : str ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = np.dot(__lowercase , __lowercase )
return np.sum(y * scores - np.log(1 + np.exp(__lowercase ) ) )
def lowercase__ ( __lowercase : List[Any] , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int=70000 ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = np.zeros(x.shape[1] )
for iterations in range(__lowercase ):
__UpperCamelCase = np.dot(__lowercase , __lowercase )
__UpperCamelCase = sigmoid_function(__lowercase )
__UpperCamelCase = np.dot(x.T , h - y ) / y.size
__UpperCamelCase = theta - alpha * gradient # updating the weights
__UpperCamelCase = np.dot(__lowercase , __lowercase )
__UpperCamelCase = sigmoid_function(__lowercase )
__UpperCamelCase = cost_function(__lowercase , __lowercase )
if iterations % 100 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a__ : Optional[Any] =datasets.load_iris()
a__ : List[str] =iris.data[:, :2]
a__ : Union[str, Any] =(iris.target != 0) * 1
a__ : List[str] =0.1
a__ : List[str] =logistic_reg(alpha, x, y, max_iterations=70_000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def lowercase__ ( __lowercase : Dict ) -> str:
"""simple docstring"""
return sigmoid_function(
np.dot(__lowercase , __lowercase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((a__) , (a__)) : Optional[int] =(x[:, 0].min(), x[:, 0].max())
((a__) , (a__)) : Optional[int] =(x[:, 1].min(), x[:, 1].max())
((a__) , (a__)) : Optional[Any] =np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a__ : str =np.c_[xxa.ravel(), xxa.ravel()]
a__ : Dict =predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 434 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _lowerCamelCase ( __A : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def _lowerCamelCase ( self : int ):
raise NotImplementedError()
| 434 | 1 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def _UpperCamelCase ( snake_case__ ) -> Optional[MinHash]:
if len(_UpperCAmelCase ) < MIN_NUM_TOKENS:
return None
__UpperCAmelCase : int = MinHash(num_perm=_UpperCAmelCase )
for token in set(_UpperCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _UpperCamelCase ( snake_case__ ) -> Set[str]:
return {t for t in NON_ALPHA.split(_UpperCAmelCase ) if len(t.strip() ) > 0}
class _snake_case :
def __init__( self: Tuple , *,
__lowerCamelCase: Dict = 0.85 , ) -> Dict:
__UpperCAmelCase : List[str] = duplication_jaccard_threshold
__UpperCAmelCase : Optional[Any] = NUM_PERM
__UpperCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__UpperCAmelCase : int = defaultdict(UpperCAmelCase__ )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] ) -> None:
__UpperCAmelCase : Optional[int] = self._index.query(UpperCAmelCase__ )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(UpperCAmelCase__ , UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(UpperCAmelCase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(UpperCAmelCase__ )
def _lowerCamelCase ( self: List[str] ) -> List[List[Dict]]:
__UpperCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
__UpperCAmelCase : Optional[Any] = [base] + list(UpperCAmelCase__ )
# reformat the cluster to be a list of dict
__UpperCAmelCase : str = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(UpperCAmelCase__ )
return duplicate_clusters
def _lowerCamelCase ( self: Any , __lowerCamelCase: Tuple ) -> None:
__UpperCAmelCase : List[Any] = self.get_duplicate_clusters()
with open(UpperCAmelCase__ , "w" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def _UpperCamelCase ( snake_case__ ) -> str:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = element
__UpperCAmelCase : int = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash, ThreadedIterator(_UpperCAmelCase, max_queue_size=1_0000 ), chunksize=100, ):
if data is not None:
yield data
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : str = DuplicationIndex(duplication_jaccard_threshold=_UpperCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_UpperCAmelCase ) ), max_queue_size=100 ) ):
di.add(_UpperCAmelCase, _UpperCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
__UpperCAmelCase : List[Any] = get_tokens(_UpperCAmelCase )
__UpperCAmelCase : List[str] = get_tokens(_UpperCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def _UpperCamelCase ( snake_case__, snake_case__ ) -> int:
__UpperCAmelCase : str = []
for elementa in cluster:
__UpperCAmelCase : Optional[int] = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
__UpperCAmelCase : Dict = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(_UpperCAmelCase, _UpperCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__UpperCAmelCase : List[str] = 1
extremes.append(_UpperCAmelCase )
return extremes
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> str:
global _shared_dataset
__UpperCAmelCase : int = dataset
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Optional[int] = partial(_find_cluster_extremes_shared, jaccard_threshold=_UpperCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_UpperCAmelCase, _UpperCAmelCase, ), total=len(_UpperCAmelCase ), ):
extremes_list.append(_UpperCAmelCase )
return extremes_list
def _UpperCamelCase ( snake_case__, snake_case__ = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__UpperCAmelCase : Dict = make_duplicate_clusters(_UpperCAmelCase, _UpperCAmelCase )
__UpperCAmelCase : int = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
__UpperCAmelCase : Dict = {}
__UpperCAmelCase : Any = find_extremes(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
__UpperCAmelCase : Any = element
__UpperCAmelCase : Dict = duplicate_indices - set(extreme_dict.keys() )
__UpperCAmelCase : Tuple = dataset.filter(lambda snake_case__, snake_case__ : idx not in remove_indices, with_indices=_UpperCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__UpperCAmelCase : Optional[Any] = element["base_index"] in extreme_dict
if element["is_extreme"]:
__UpperCAmelCase : List[str] = extreme_dict[element["base_index"]]["copies"]
print(f'''Original dataset size: {len(_UpperCAmelCase )}''' )
print(f'''Number of duplicate clusters: {len(_UpperCAmelCase )}''' )
print(f'''Files in duplicate cluster: {len(_UpperCAmelCase )}''' )
print(f'''Unique files in duplicate cluster: {len(_UpperCAmelCase )}''' )
print(f'''Filtered dataset size: {len(_UpperCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 382 |
'''simple docstring'''
def a ( _UpperCAmelCase = 5_0 ) -> int:
"""simple docstring"""
a_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
__a = []
__a = 0
__a = 0
def snake_case_ ( self ):
return self.head == self.tail
def snake_case_ ( self , __A ):
self.data.append(__A )
__a = self.tail + 1
def snake_case_ ( self ):
__a = self.data[self.head]
__a = self.head + 1
return ret
def snake_case_ ( self ):
return self.tail - self.head
def snake_case_ ( self ):
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A ):
__a = data
__a = None
__a = None
__a = 1
def snake_case_ ( self ):
return self.data
def snake_case_ ( self ):
return self.left
def snake_case_ ( self ):
return self.right
def snake_case_ ( self ):
return self.height
def snake_case_ ( self , __A ):
__a = data
def snake_case_ ( self , __A ):
__a = node
def snake_case_ ( self , __A ):
__a = node
def snake_case_ ( self , __A ):
__a = height
def a (lowerCAmelCase__ ):
if node is None:
return 0
return node.get_height()
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
if a > b:
return a
return b
def a (lowerCAmelCase__ ):
print("""left rotation node:""" , node.get_data() )
__a = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowerCAmelCase__ )
__a = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
__a = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase__ )
return ret
def a (lowerCAmelCase__ ):
print("""right rotation node:""" , node.get_data() )
__a = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowerCAmelCase__ )
__a = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
__a = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase__ )
return ret
def a (lowerCAmelCase__ ):
__a = node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowerCAmelCase__ ) )
return right_rotation(lowerCAmelCase__ )
def a (lowerCAmelCase__ ):
__a = node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowerCAmelCase__ ) )
return left_rotation(lowerCAmelCase__ )
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
if node is None:
return MyNode(lowerCAmelCase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowerCAmelCase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__a = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__a = right_rotation(lowerCAmelCase__ )
else:
__a = lr_rotation(lowerCAmelCase__ )
else:
node.set_right(insert_node(node.get_right() , lowerCAmelCase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__a = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__a = rl_rotation(lowerCAmelCase__ )
else:
__a = left_rotation(lowerCAmelCase__ )
__a = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
return node
def a (lowerCAmelCase__ ):
while True:
__a = root.get_right()
if right_child is None:
break
__a = right_child
return root.get_data()
def a (lowerCAmelCase__ ):
while True:
__a = root.get_left()
if left_child is None:
break
__a = left_child
return root.get_data()
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
__a = root.get_left()
__a = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__a = get_left_most(lowerCAmelCase__ )
root.set_data(lowerCAmelCase__ )
root.set_right(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
elif left_child is not None:
__a = left_child
elif right_child is not None:
__a = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
if get_height(lowerCAmelCase__ ) - get_height(lowerCAmelCase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__a = left_rotation(lowerCAmelCase__ )
else:
__a = rl_rotation(lowerCAmelCase__ )
elif get_height(lowerCAmelCase__ ) - get_height(lowerCAmelCase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__a = right_rotation(lowerCAmelCase__ )
else:
__a = lr_rotation(lowerCAmelCase__ )
__a = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowerCAmelCase__ )
return root
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
__a = None
def snake_case_ ( self ):
return get_height(self.root )
def snake_case_ ( self , __A ):
print("""insert:""" + str(__A ) )
__a = insert_node(self.root , __A )
def snake_case_ ( self , __A ):
print("""delete:""" + str(__A ) )
if self.root is None:
print("""Tree is empty!""" )
return
__a = del_node(self.root , __A )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
__a = """"""
__a = MyQueue()
q.push(self.root )
__a = self.get_height()
if layer == 0:
return output
__a = 0
while not q.is_empty():
__a = q.pop()
__a = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__A )
q.push(__A )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__a = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , __A ) - 1:
__a = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def a ():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
SCREAMING_SNAKE_CASE = AVLtree()
SCREAMING_SNAKE_CASE = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 708 |
from math import pow
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
__a = int(pow(lowerCAmelCase__ , lowerCAmelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
__a , __a = backtrack(
lowerCAmelCase__ , lowerCAmelCase__ , current_number + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
__a , __a = backtrack(
lowerCAmelCase__ , lowerCAmelCase__ , current_number + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
return current_sum, solutions_count
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(lowerCAmelCase__ , lowerCAmelCase__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : List[Any] = "fnet"
def __init__( self , lowercase_=3_2_0_0_0 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=4 , lowercase_=0.0_2 , lowercase_=1E-12 , lowercase_=False , lowercase_=5_1_2 , lowercase_=3 , lowercase_=1 , lowercase_=2 , **lowercase_ , ) -> int:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_tpu_fourier_optimizations
UpperCAmelCase = tpu_short_seq_length
| 373 |
"""simple docstring"""
import math
def lowercase__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCAmelCase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
SCREAMING_SNAKE_CASE_ = '''Enter the base and the power separated by a comma: '''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = map(int, input(prompt).split(''','''))
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
SCREAMING_SNAKE_CASE_ = res(xa, ya)
SCREAMING_SNAKE_CASE_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 373 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "megatron-bert"
def __init__( self : Optional[int] , _A : Any=2_9_0_5_6 , _A : Union[str, Any]=1_0_2_4 , _A : Union[str, Any]=2_4 , _A : Optional[Any]=1_6 , _A : Union[str, Any]=4_0_9_6 , _A : List[Any]="gelu" , _A : Tuple=0.1 , _A : int=0.1 , _A : str=5_1_2 , _A : Dict=2 , _A : Any=0.02 , _A : List[str]=1e-12 , _A : List[str]=0 , _A : Dict="absolute" , _A : Tuple=True , **_A : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , **_A)
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
_SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Dict = num_attention_heads
_SCREAMING_SNAKE_CASE : List[Any] = hidden_act
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : str = max_position_embeddings
_SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
_SCREAMING_SNAKE_CASE : str = initializer_range
_SCREAMING_SNAKE_CASE : int = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = position_embedding_type
_SCREAMING_SNAKE_CASE : str = use_cache
| 635 | """simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str:
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict(
{
"""train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ),
"""validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_SCREAMING_SNAKE_CASE : str = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_SCREAMING_SNAKE_CASE : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
_SCREAMING_SNAKE_CASE : Any = 8
else:
_SCREAMING_SNAKE_CASE : Optional[int] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
# New Code #
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
# Download the dataset
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE : Tuple = config["""lr"""]
_SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE : int = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_SCREAMING_SNAKE_CASE : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE
_SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
_SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = outputs.loss
_SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
_SCREAMING_SNAKE_CASE : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
_SCREAMING_SNAKE_CASE : str = []
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
_SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_()-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" )
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 635 | 1 |
'''simple docstring'''
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = [1]
for i in range(2 , A__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__lowercase = []
__lowercase = list(range(A__ ) )
# Find permutation
while factorials:
__lowercase = factorials.pop()
__lowercase , __lowercase = divmod(A__ , A__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : int = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=UpperCAmelCase__ , )
assert hasattr(self , "env" )
def lowerCAmelCase__ ( self , UpperCAmelCase__=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=UpperCAmelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase__ , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
def lowerCAmelCase__ ( self ):
# create estimator
SCREAMING_SNAKE_CASE__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
SCREAMING_SNAKE_CASE__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCAmelCase__ )
| 700 |
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_lowerCamelCase = datasets.logging.get_logger(__name__)
_lowerCamelCase = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
_lowerCamelCase = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
_lowerCamelCase = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
_lowerCamelCase = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." )
SCREAMING_SNAKE_CASE__ = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE__ = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE__ = self.config_name.upper()
else:
raise KeyError(
f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
SCREAMING_SNAKE_CASE__ = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
SCREAMING_SNAKE_CASE__ = score.BleurtScorer(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ = self.scorer.score(references=UpperCAmelCase__ , candidates=UpperCAmelCase__ )
return {"scores": scores}
| 112 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str = ConsistencyModelPipeline
snake_case__ : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case__ : Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
snake_case__ : Union[str, Any] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : Any=False ) -> Dict:
if class_cond:
__SCREAMING_SNAKE_CASE = self.dummy_cond_unet
else:
__SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
# Default to CM multistep sampler
__SCREAMING_SNAKE_CASE = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE = {
"unet": unet,
"scheduler": scheduler,
}
return components
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=0 ) -> Union[str, Any]:
if str(UpperCAmelCase__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [2_2, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self : str ) -> Any:
__SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components(class_cond=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
__SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components(class_cond=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : int="cpu" , UpperCAmelCase__ : Optional[Any]=torch.floataa , UpperCAmelCase__ : Optional[Any]=(1, 3, 6_4, 6_4) ) -> Tuple:
__SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
"num_inference_steps": None,
"timesteps": [2_2, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
__SCREAMING_SNAKE_CASE = self.get_fixed_latents(seed=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ , shape=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = latents
return inputs
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : str="cpu" , UpperCAmelCase__ : Dict=torch.floataa , UpperCAmelCase__ : Union[str, Any]=(1, 3, 6_4, 6_4) ) -> List[Any]:
if type(UpperCAmelCase__ ) == str:
__SCREAMING_SNAKE_CASE = torch.device(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
return latents
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__SCREAMING_SNAKE_CASE = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(torch_device=UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_inputs()
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__SCREAMING_SNAKE_CASE = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(torch_device=UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_inputs()
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__SCREAMING_SNAKE_CASE = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(torch_device=UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_inputs(get_fixed_latents=UpperCAmelCase__ , device=UpperCAmelCase__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCAmelCase__ , enable_math=UpperCAmelCase__ , enable_mem_efficient=UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
__SCREAMING_SNAKE_CASE = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
__SCREAMING_SNAKE_CASE = ConsistencyModelPipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(torch_device=UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_inputs(get_fixed_latents=UpperCAmelCase__ , device=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCAmelCase__ , enable_math=UpperCAmelCase__ , enable_mem_efficient=UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = pipe(**UpperCAmelCase__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 682 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1.5
__SCREAMING_SNAKE_CASE = int(factor * num_class_images )
__SCREAMING_SNAKE_CASE = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowerCAmelCase_ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
__SCREAMING_SNAKE_CASE = client.query(text=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
__SCREAMING_SNAKE_CASE = int(factor * num_images )
__SCREAMING_SNAKE_CASE = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 , )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = tqdm(desc="downloading real regularization images" , total=lowerCAmelCase_ )
with open(f"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(f"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
f"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
__SCREAMING_SNAKE_CASE = class_images[count]
count += 1
try:
__SCREAMING_SNAKE_CASE = requests.get(images["url"] )
if img.status_code == 200:
__SCREAMING_SNAKE_CASE = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser("" , add_help=lowerCAmelCase_ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=lowerCAmelCase_ , type=lowerCAmelCase_ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=lowerCAmelCase_ , type=lowerCAmelCase_ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=lowerCAmelCase_ )
return parser.parse_args()
if __name__ == "__main__":
a__ : Optional[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 682 | 1 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__A : Optional[int] = HUGGINGFACE_HUB_CACHE
__A : int = "config.json"
__A : int = "diffusion_pytorch_model.bin"
__A : Tuple = "diffusion_flax_model.msgpack"
__A : Union[str, Any] = "model.onnx"
__A : Tuple = "diffusion_pytorch_model.safetensors"
__A : int = "weights.pb"
__A : Tuple = "https://huggingface.co"
__A : str = default_cache_path
__A : List[str] = "diffusers_modules"
__A : Dict = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__A : str = ["fp16", "non-ema"]
__A : Dict = ".self_attn"
| 701 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__A : Dict = "http://www.mocksite.com/file1.txt"
__A : List[str] = "\"text\": [\"foo\", \"foo\"]"
__A : int = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class __snake_case :
"""simple docstring"""
lowercase = 2_00
lowercase = {'Content-Length': '100'}
lowercase = {}
def __lowercase ( self : Union[str, Any] , **lowerCamelCase : Optional[int] ) -> str:
return [bytes(lowerCamelCase , """utf-8""" )]
def UpperCamelCase_ ( *A__ : List[str] , **A__ : Union[str, Any] ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def UpperCamelCase_ ( A__ : List[Any] , A__ : List[Any] , A__ : str ):
'''simple docstring'''
import requests
monkeypatch.setattr(A__ , """request""" , A__ )
lowerCAmelCase_ : Tuple = URL
if issubclass(A__ , A__ ):
lowerCAmelCase_ : Optional[Any] = url
elif issubclass(A__ , A__ ):
lowerCAmelCase_ : Dict = [url]
elif issubclass(A__ , A__ ):
lowerCAmelCase_ : Tuple = {"""train""": url}
lowerCAmelCase_ : List[Any] = """dummy"""
lowerCAmelCase_ : str = """downloads"""
lowerCAmelCase_ : Dict = tmp_path
lowerCAmelCase_ : Any = DownloadConfig(
cache_dir=os.path.join(A__ , A__ ) , use_etag=A__ , )
lowerCAmelCase_ : List[Any] = DownloadManager(dataset_name=A__ , download_config=A__ )
lowerCAmelCase_ : int = dl_manager.download(A__ )
lowerCAmelCase_ : Any = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(A__ , A__ ):
lowerCAmelCase_ : str = [downloaded_paths]
lowerCAmelCase_ : Any = [urls]
elif isinstance(A__ , A__ ):
assert "train" in downloaded_paths.keys()
lowerCAmelCase_ : Union[str, Any] = downloaded_paths.values()
lowerCAmelCase_ : Optional[Any] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(A__ , A__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCAmelCase_ : Tuple = Path(A__ )
lowerCAmelCase_ : List[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCAmelCase_ : Optional[Any] = downloaded_path.read_text()
assert content == CONTENT
lowerCAmelCase_ : Tuple = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
lowerCAmelCase_ : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : List[Any] , A__ : List[str] ):
'''simple docstring'''
lowerCAmelCase_ : int = str(A__ )
if issubclass(A__ , A__ ):
lowerCAmelCase_ : int = filename
elif issubclass(A__ , A__ ):
lowerCAmelCase_ : List[str] = [filename]
elif issubclass(A__ , A__ ):
lowerCAmelCase_ : Union[str, Any] = {"""train""": filename}
lowerCAmelCase_ : Optional[int] = """dummy"""
lowerCAmelCase_ : str = xz_file.parent
lowerCAmelCase_ : List[str] = """extracted"""
lowerCAmelCase_ : Union[str, Any] = DownloadConfig(
cache_dir=A__ , use_etag=A__ , )
lowerCAmelCase_ : str = DownloadManager(dataset_name=A__ , download_config=A__ )
lowerCAmelCase_ : Union[str, Any] = dl_manager.extract(A__ )
lowerCAmelCase_ : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(A__ , A__ ):
lowerCAmelCase_ : List[str] = [extracted_paths]
lowerCAmelCase_ : Union[str, Any] = [paths]
elif isinstance(A__ , A__ ):
assert "train" in extracted_paths.keys()
lowerCAmelCase_ : Union[str, Any] = extracted_paths.values()
lowerCAmelCase_ : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(A__ , A__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCAmelCase_ : int = Path(A__ )
lowerCAmelCase_ : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(A__ , etag=A__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCAmelCase_ : Any = extracted_path.read_text()
lowerCAmelCase_ : Optional[Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCamelCase_ ( A__ : Dict , A__ : Any ):
'''simple docstring'''
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(A__ , start=1 ):
lowerCAmelCase_ : int = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def UpperCamelCase_ ( A__ : Optional[Any] , A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = request.getfixturevalue(A__ )
lowerCAmelCase_ : List[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def UpperCamelCase_ ( A__ : str , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = request.getfixturevalue(A__ )
lowerCAmelCase_ : str = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_tar == 1
assert num_jsonl == 2
def UpperCamelCase_ ( A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(A__ ) , start=1 ):
assert os.path.basename(A__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 398 | 0 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : List[str] = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int ="""mvp"""
__UpperCAmelCase : str =["""past_key_values"""]
__UpperCAmelCase : Dict ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __a=5_02_67 , __a=10_24 , __a=12 , __a=40_96 , __a=16 , __a=12 , __a=40_96 , __a=16 , __a=0.0 , __a=0.0 , __a="gelu" , __a=10_24 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.0_2 , __a=0.0 , __a=False , __a=True , __a=1 , __a=0 , __a=2 , __a=True , __a=2 , __a=2 , __a=False , __a=1_00 , __a=8_00 , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = classifier_dropout
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = use_prompt
__lowerCAmelCase = prompt_length
__lowerCAmelCase = prompt_mid_dim
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , **__a , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , __a ):
__lowerCAmelCase = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"The config can simply be saved and uploaded again to be fixed." )
| 636 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =field(default="""image-classification""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
__UpperCAmelCase : ClassVar[Features] =Features({"""image""": Image()} )
__UpperCAmelCase : ClassVar[Features] =Features({"""labels""": ClassLabel} )
__UpperCAmelCase : str ="image"
__UpperCAmelCase : str ="labels"
def snake_case ( self , __a ):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __a ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
__lowerCAmelCase = copy.deepcopy(self )
__lowerCAmelCase = self.label_schema.copy()
__lowerCAmelCase = features[self.label_column]
__lowerCAmelCase = label_schema
return task_template
@property
def snake_case ( self ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 636 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : NestedDataStructureLike[PathLike] , __lowerCamelCase : Optional[NamedSplit] = None , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : Tuple , ) -> Union[str, Any]:
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = field
SCREAMING_SNAKE_CASE__ = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE__ = Json(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , field=__lowerCamelCase , **__lowerCamelCase , )
def lowercase_ ( self : List[str] ) -> Dict:
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE__ = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dataset , __lowerCamelCase : Union[PathLike, BinaryIO] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : Optional[int] , ) -> str:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
SCREAMING_SNAKE_CASE__ = dataset
SCREAMING_SNAKE_CASE__ = path_or_buf
SCREAMING_SNAKE_CASE__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE__ = num_proc
SCREAMING_SNAKE_CASE__ = '''utf-8'''
SCREAMING_SNAKE_CASE__ = to_json_kwargs
def lowercase_ ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = self.to_json_kwargs.pop('''path_or_buf''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.to_json_kwargs.pop('''orient''' , '''records''' )
SCREAMING_SNAKE_CASE__ = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
SCREAMING_SNAKE_CASE__ = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
SCREAMING_SNAKE_CASE__ = self.to_json_kwargs.pop('''compression''' , __lowerCamelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__lowerCamelCase ) as buffer:
SCREAMING_SNAKE_CASE__ = self._write(file_obj=__lowerCamelCase , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
SCREAMING_SNAKE_CASE__ = self._write(
file_obj=self.path_or_buf , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **self.to_json_kwargs )
return written
def lowercase_ ( self : Any , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = query_table(
table=self.dataset.data , key=slice(__lowerCamelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE__ = batch.to_pandas().to_json(
path_or_buf=__lowerCamelCase , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **__lowerCamelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowercase_ ( self : str , __lowerCamelCase : BinaryIO , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str , **__lowerCamelCase : Any , ) -> int:
SCREAMING_SNAKE_CASE__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
SCREAMING_SNAKE_CASE__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __lowerCamelCase , __lowerCamelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__lowerCamelCase )
return written
| 706 |
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> None:
SCREAMING_SNAKE_CASE__ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4]
def lowercase_ ( self : List[str] ) -> list[float]:
SCREAMING_SNAKE_CASE__ = len(self.first_signal )
SCREAMING_SNAKE_CASE__ = len(self.second_signal )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , __lowerCamelCase )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE__ = [[0] * max_length for i in range(__lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = deque(self.second_signal )
rotated_signal.rotate(__lowerCamelCase )
for j, item in enumerate(__lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE__ = np.matmul(np.transpose(__lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 472 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__A : Union[str, Any] = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__A : int = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__A : int = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def a__ ( self :List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def a__ ( self :Any ,_UpperCamelCase :str ,_UpperCamelCase :Any ):
snake_case_ : Dict = 0.0
for i, j in zip(_UpperCAmelCase ,_UpperCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_UpperCAmelCase ,_UpperCAmelCase ) else 0.0
snake_case_ : str = n_correct / len(_UpperCAmelCase )
return {
"accuracy": accuracy,
} | 334 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = """ylacombe/bark-small"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = """en_speaker_1"""
lowercase__ = """This is a test string"""
lowercase__ = """speaker_embeddings_path.json"""
lowercase__ = """speaker_embeddings"""
def lowerCamelCase__ (self : str , **_UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase__ (self : str ) -> Tuple:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase__ = 35
lowercase__ = 2
lowercase__ = 8
lowercase__ = {
"""semantic_prompt""": np.ones(_UpperCAmelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase__ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
lowercase__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase__ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
lowercase__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=_UpperCAmelCase )
lowercase__ = processor(text=self.input_string )
lowercase__ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 15 | 0 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase__ : List[str] = HfApi()
lowerCAmelCase__ : str = {}
# fmt: off
lowerCAmelCase__ : int = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
lowerCAmelCase__ : Dict = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
lowerCAmelCase__ : List[str] = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
lowerCAmelCase__ : Union[str, Any] = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
lowerCAmelCase__ : List[Any] = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
lowerCAmelCase__ : Optional[Any] = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
lowerCAmelCase__ : List[str] = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
lowerCAmelCase__ : List[str] = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
lowerCAmelCase__ : List[Any] = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
lowerCAmelCase__ : Tuple = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
lowerCAmelCase__ : List[str] = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
lowerCAmelCase__ : Dict = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
lowerCAmelCase__ : Any = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
lowerCAmelCase__ : Any = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase__ : List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('''CompVis'''):
lowerCAmelCase__ : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowerCAmelCase__ : str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase__ : Any = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase__ : List[str] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase__ : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 714 | from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
class __snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCamelCase = None
__lowerCamelCase = None
class __snake_case ( folder_based_builder.FolderBasedBuilder ):
__lowerCamelCase = datasets.Audio()
__lowerCamelCase = """audio"""
__lowerCamelCase = AudioFolderConfig
__lowerCamelCase = 42 # definition at the bottom of the script
__lowerCamelCase = AudioClassification(audio_column="""audio""" ,label_column="""label""" )
lowerCAmelCase__ : Tuple = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCAmelCase__ : List[Any] = AUDIO_EXTENSIONS
| 699 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
snake_case__ : Union[str, Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
snake_case__ : Any = typing.Union[np.floataa, int, float] # noqa: UP007
def _snake_case (__lowercase , __lowercase):
return np.sqrt(np.sum((np.asarray(__lowercase) - np.asarray(__lowercase)) ** 2))
def _snake_case (__lowercase , __lowercase):
return sum((va - va) ** 2 for va, va in zip(__lowercase , __lowercase)) ** (1 / 2)
if __name__ == "__main__":
def _snake_case ():
from timeit import timeit
print('Without Numpy')
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ))
print('With Numpy')
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ))
benchmark()
| 23 |
import numpy as np
def _snake_case (__lowercase):
return 1 / (1 + np.exp(-vector))
def _snake_case (__lowercase):
return vector * sigmoid(__lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 1 |
'''simple docstring'''
def lowercase (_A , _A , _A ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowercase_ ) )
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
if index == len(lowercase_ ):
return True
# Recursive Step
for i in range(lowercase_ ):
if valid_coloring(graph[index] , lowercase_ , lowercase_ ):
# Color current vertex
_lowerCAmelCase : Optional[Any] = i
# Validate coloring
if util_color(lowercase_ , lowercase_ , lowercase_ , index + 1 ):
return True
# Backtrack
_lowerCAmelCase : str = -1
return False
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Dict = [-1] * len(lowercase_ )
if util_color(lowercase_ , lowercase_ , lowercase_ , 0 ):
return colored_vertices
return []
| 720 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def lowercase (_A = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def lowercase (_A = "" ):
"""simple docstring"""
if len(_A ) == 0:
return True
_lowerCAmelCase : Union[str, Any] = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_lowerCAmelCase : dict[str, int] = {}
for character in lower_case_input_str:
_lowerCAmelCase : Union[str, Any] = character_freq_dict.get(_A , 0 ) + 1
_lowerCAmelCase : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowercase (_A = "" ):
"""simple docstring"""
print('\nFor string = ' , _A , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_A ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
lowerCAmelCase : Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
lowerCAmelCase : Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 630 | 0 |
'''simple docstring'''
from ....utils import logging
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__(self , lowercase , lowercase=None , lowercase=2048 ):
A_ : Dict = config.__dict__
A_ : Tuple = modal_hidden_size
if num_labels:
A_ : Union[str, Any] = num_labels | 667 |
'''simple docstring'''
from math import pi, sqrt, tan
def UpperCamelCase ( lowercase_ : float ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def UpperCamelCase ( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCamelCase ( lowercase_ : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def UpperCamelCase ( lowercase_ : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCamelCase ( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
lowercase =(height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(lowercase_ , 2 ) * torus_radius * tube_radius
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def UpperCamelCase ( lowercase_ : float ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def UpperCamelCase ( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
lowercase =(sidea + sidea + sidea) / 2
lowercase =sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def UpperCamelCase ( lowercase_ : float , lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def UpperCamelCase ( lowercase_ : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def UpperCamelCase ( lowercase_ : float , lowercase_ : float ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCamelCase ( lowercase_ : int , lowercase_ : float ) -> float:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('''\nSurface Areas of various geometric shapes: \n''')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 72 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Namespace ) -> Optional[int]:
"""simple docstring"""
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
__SCREAMING_SNAKE_CASE = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class lowerCAmelCase_ ( __lowerCAmelCase ):
'''simple docstring'''
@staticmethod
def __lowerCamelCase ( __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : int =parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=lowerCamelCase__ , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=lowerCamelCase__ , default=lowerCamelCase__ , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : List[Any] =logging.get_logger('transformers-cli/converting' )
self._logger.info(F"""Loading model {model_type}""" )
SCREAMING_SNAKE_CASE_ : List[str] =model_type
SCREAMING_SNAKE_CASE_ : Optional[int] =tf_checkpoint
SCREAMING_SNAKE_CASE_ : List[Any] =pytorch_dump_output
SCREAMING_SNAKE_CASE_ : List[Any] =config
SCREAMING_SNAKE_CASE_ : Any =finetuning_task_name
def __lowerCamelCase ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
if "ckpt" in self._tf_checkpoint.lower():
SCREAMING_SNAKE_CASE_ : str =self._tf_checkpoint
SCREAMING_SNAKE_CASE_ : List[Any] =''''''
else:
SCREAMING_SNAKE_CASE_ : Any =self._tf_checkpoint
SCREAMING_SNAKE_CASE_ : List[Any] =''''''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCamelCase__ , self._config , self._pytorch_dump_output , lowerCamelCase__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 718 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = IMAGENET_DEFAULT_MEAN , __UpperCAmelCase = IMAGENET_DEFAULT_STD , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE_ : List[Any] =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ : Union[str, Any] =get_size_dict(__UpperCAmelCase , param_name='crop_size' )
SCREAMING_SNAKE_CASE_ : Tuple =do_resize
SCREAMING_SNAKE_CASE_ : Dict =size
SCREAMING_SNAKE_CASE_ : Tuple =resample
SCREAMING_SNAKE_CASE_ : List[str] =do_center_crop
SCREAMING_SNAKE_CASE_ : Optional[int] =crop_size
SCREAMING_SNAKE_CASE_ : int =do_rescale
SCREAMING_SNAKE_CASE_ : List[Any] =rescale_factor
SCREAMING_SNAKE_CASE_ : Any =do_normalize
SCREAMING_SNAKE_CASE_ : Tuple =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE_ : Tuple =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE_ : List[str] =int((256 / 224) * size['shortest_edge'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] =get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple ={'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
__UpperCAmelCase , size=(size_dict['height'], size_dict['width']) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : List[Any] =get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(__UpperCAmelCase , size=(size['height'], size['width']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Optional[int] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : List[str] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Tuple =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Union[str, Any] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Tuple =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : int =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[Any] =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : List[str] =size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Any =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Optional[Any] =get_size_dict(__UpperCAmelCase , param_name='crop_size' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Any =[to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Dict =[self.resize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : Any =[self.center_crop(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : List[Any] =[self.rescale(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : List[str] =[self.normalize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple =[to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple ={'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 153 | 0 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = "▁"
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Dict = BigBirdTokenizer
__lowerCamelCase : Optional[Any] = BigBirdTokenizerFast
__lowerCamelCase : Any = True
__lowerCamelCase : Union[str, Any] = True
def _snake_case ( self ) -> Any:
super().setUp()
_lowerCAmelCase = self.tokenizer_class(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = "<s>"
_lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(_lowerCAmelCase ) , 1004 )
def _snake_case ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = "I was born in 92000, and this is falsé."
_lowerCAmelCase = tokenizer.tokenize(_lowerCAmelCase )
_lowerCAmelCase = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = tokenizer.encode(_lowerCAmelCase )
_lowerCAmelCase = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> int:
_lowerCAmelCase = BigBirdTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [285, 46, 10, 170, 382] , )
_lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _snake_case ( self ) -> Any:
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = "Hello World!"
_lowerCAmelCase = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@slow
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
_lowerCAmelCase = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@require_torch
@slow
def _snake_case ( self ) -> Any:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_lowerCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowerCAmelCase = " ".join(_lowerCAmelCase )
_lowerCAmelCase = self.big_tokenizer.encode_plus(_lowerCAmelCase , return_tensors="pt" , return_token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = BigBirdConfig(attention_type="original_full" )
_lowerCAmelCase = BigBirdModel(_lowerCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCAmelCase )
model(**_lowerCAmelCase )
@slow
def _snake_case ( self ) -> int:
_lowerCAmelCase = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
_lowerCAmelCase = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def _snake_case ( self ) -> Any:
# fmt: off
_lowerCAmelCase = {"input_ids": [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 18 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = CustomTokenizer
pass
| 59 | 0 |
from __future__ import annotations
from collections.abc import Generator
def lowerCAmelCase__ ( )-> Generator[int, None, None]:
A__ = {}
A__ = 2
while True:
A__ = factor_map.pop(UpperCamelCase_ , UpperCamelCase_ )
if factor:
A__ = factor + prime
while x in factor_map:
x += factor
A__ = factor
else:
A__ = prime
yield prime
prime += 1
def lowerCAmelCase__ ( UpperCamelCase_ : float = 1E10 )-> int:
A__ = sieve()
A__ = 1
while True:
A__ = next(UpperCamelCase_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCamelCase_ )
n += 2
if __name__ == "__main__":
print(solution())
| 526 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( )-> str:
A__ = 1_0
A__ = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
A__ = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [9_7], '''text''': ['''1976''']}] * 1_0,
'''id''': list(range(UpperCamelCase_ ) ),
} , features=UpperCamelCase_ , )
return dataset
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int )-> Optional[Any]:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=UpperCamelCase_ )
return filename
# FILE_CONTENT + files
_lowercase = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : str )-> List[Any]:
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
A__ = FILE_CONTENT
with open(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ )
return filename
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] )-> Optional[Any]:
import bza
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
A__ = bytes(UpperCamelCase_ , '''utf-8''' )
with bza.open(UpperCamelCase_ , '''wb''' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[Any] )-> int:
import gzip
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
A__ = bytes(UpperCamelCase_ , '''utf-8''' )
with gzip.open(UpperCamelCase_ , '''wb''' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : str )-> Any:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
A__ = bytes(UpperCamelCase_ , '''utf-8''' )
with lza.frame.open(UpperCamelCase_ , '''wb''' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple )-> int:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(UpperCamelCase_ , '''w''' ) as archive:
archive.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Tuple , UpperCamelCase_ : int )-> Optional[Any]:
import tarfile
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(UpperCamelCase_ , '''w''' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] )-> str:
import lzma
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
A__ = bytes(UpperCamelCase_ , '''utf-8''' )
with lzma.open(UpperCamelCase_ , '''wb''' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] )-> List[str]:
import zipfile
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Tuple )-> str:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
A__ = bytes(UpperCamelCase_ , '''utf-8''' )
with zstd.open(UpperCamelCase_ , '''wb''' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] )-> int:
A__ = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
A__ = textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ )
return filename
_lowercase = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
_lowercase = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
_lowercase = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
_lowercase = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
_lowercase = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( )-> str:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : List[Any] )-> List[str]:
A__ = datasets.Dataset.from_dict(UpperCamelCase_ )
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Dict )-> List[str]:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(UpperCamelCase_ ) ) as con:
A__ = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] )-> Tuple:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(UpperCamelCase_ , '''w''' , newline='''''' ) as f:
A__ = csv.DictWriter(UpperCamelCase_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Dict )-> List[Any]:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(UpperCamelCase_ , '''w''' , newline='''''' ) as f:
A__ = csv.DictWriter(UpperCamelCase_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] )-> List[str]:
import bza
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(UpperCamelCase_ , '''rb''' ) as f:
A__ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCamelCase_ , '''wb''' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] )-> str:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Any )-> List[Any]:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] )-> Tuple:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Dict )-> Optional[int]:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
A__ = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(UpperCamelCase_ , '''wb''' ) as f:
A__ = pq.ParquetWriter(UpperCamelCase_ , schema=UpperCamelCase_ )
A__ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCamelCase_ ) )] for k in DATA[0]} , schema=UpperCamelCase_ )
writer.write_table(UpperCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] )-> str:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
A__ = {'''data''': DATA}
with open(UpperCamelCase_ , '''w''' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] )-> Optional[Any]:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
A__ = {'''data''': DATA_DICT_OF_LISTS}
with open(UpperCamelCase_ , '''w''' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Dict )-> List[Any]:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(UpperCamelCase_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : str )-> Tuple:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(UpperCamelCase_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : int )-> Optional[int]:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(UpperCamelCase_ , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : int )-> Union[str, Any]:
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(UpperCamelCase_ , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str )-> List[str]:
import gzip
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(UpperCamelCase_ , '''rb''' ) as orig_file:
with gzip.open(UpperCamelCase_ , '''wb''' ) as zipped_file:
zipped_file.writelines(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] )-> List[Any]:
import gzip
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(UpperCamelCase_ , '''rb''' ) as orig_file:
with gzip.open(UpperCamelCase_ , '''wb''' ) as zipped_file:
zipped_file.writelines(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] )-> Optional[int]:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] )-> Union[str, Any]:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('''nested''' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any )-> Any:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : str )-> Tuple:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(UpperCamelCase_ , '''w''' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] )-> str:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(UpperCamelCase_ , '''w''' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.join('''nested''' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Any )-> List[str]:
A__ = ['''0''', '''1''', '''2''', '''3''']
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(UpperCamelCase_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : int )-> Dict:
A__ = ['''0''', '''1''', '''2''', '''3''']
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(UpperCamelCase_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] )-> Union[str, Any]:
A__ = ['''0''', '''1''', '''2''', '''3''']
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(UpperCamelCase_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] )-> List[str]:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] )-> Tuple:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple )-> Any:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(UpperCamelCase_ , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Dict )-> Any:
A__ = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
A__ = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( )-> Tuple:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( )-> Dict:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] )-> List[Any]:
A__ = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(UpperCamelCase_ , '''w''' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[Any] )-> str:
A__ = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 1_0 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 1_0 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
return data_dir
| 526 | 1 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__(self , A , A=1_3 , A=3_0 , A=2 , A=3 , A=True , A=True , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=1_0 , A=0.02 , A=3 , A=0.6 , A=None , ):
lowerCamelCase_ : Optional[int] = parent
lowerCamelCase_ : Dict = batch_size
lowerCamelCase_ : int = image_size
lowerCamelCase_ : Union[str, Any] = patch_size
lowerCamelCase_ : Dict = num_channels
lowerCamelCase_ : Any = is_training
lowerCamelCase_ : Tuple = use_labels
lowerCamelCase_ : List[str] = hidden_size
lowerCamelCase_ : Tuple = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : Optional[int] = hidden_act
lowerCamelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase_ : List[Any] = attention_probs_dropout_prob
lowerCamelCase_ : Union[str, Any] = type_sequence_label_size
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : int = mask_ratio
lowerCamelCase_ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase_ : Union[str, Any] = (image_size // patch_size) ** 2
lowerCamelCase_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ (self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : int = ViTMAEModel(config=A )
model.to(A )
model.eval()
lowerCamelCase_ : Any = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : List[str] = ViTMAEForPreTraining(A )
model.to(A )
model.eval()
lowerCamelCase_ : Dict = model(A )
lowerCamelCase_ : Dict = (self.image_size // self.patch_size) ** 2
lowerCamelCase_ : Tuple = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : Optional[Any] = ViTMAEForPreTraining(A )
model.to(A )
model.eval()
lowerCamelCase_ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ : Optional[Any] = model(A )
lowerCamelCase_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.prepare_config_and_inputs()
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = config_and_inputs
lowerCamelCase_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase : Tuple = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCamelCase : Any = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
lowerCamelCase : Dict = False
lowerCamelCase : List[Any] = False
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Any = False
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = ViTMAEModelTester(self )
lowerCamelCase_ : Optional[int] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def UpperCAmelCase__ (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Tuple = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Any = model_class(A )
lowerCamelCase_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Optional[int] = [*signature.parameters.keys()]
lowerCamelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def UpperCAmelCase__ (self , A , A , A ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase_ : Any = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase_ : int = torch.from_numpy(A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase_ : str = pt_noise
super().check_pt_tf_models(A , A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : str = model_class(A )
model.to(A )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase_ : Dict = model(**self._prepare_for_class(A , A ) )
lowerCamelCase_ : Dict = outputs[0].cpu().numpy()
lowerCamelCase_ : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
lowerCamelCase_ : int = model_class.from_pretrained(A )
model.to(A )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase_ : Dict = model(**self._prepare_for_class(A , A ) )
# Make sure we don't have nans
lowerCamelCase_ : List[str] = after_outputs[0].cpu().numpy()
lowerCamelCase_ : List[str] = 0
lowerCamelCase_ : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A , 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase__ (self ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase__ (self ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def UpperCAmelCase__ (self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def UpperCAmelCase__ (self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ (self ):
pass
@slow
def UpperCAmelCase__ (self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : List[Any] = ViTMAEModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ (self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def UpperCAmelCase__ (self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase_ : Tuple = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(A )
lowerCamelCase_ : Optional[int] = self.default_image_processor
lowerCamelCase_ : Union[str, Any] = prepare_img()
lowerCamelCase_ : Optional[Any] = image_processor(images=A , return_tensors='''pt''' ).to(A )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase_ : int = ViTMAEConfig()
lowerCamelCase_ : Dict = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase_ : Dict = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] = model(**A , noise=torch.from_numpy(A ).to(device=A ) )
# verify the logits
lowerCamelCase_ : int = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , A )
lowerCamelCase_ : Optional[int] = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(A ) , atol=1E-4 ) )
| 422 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Any = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
__lowercase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 422 | 1 |
from random import randint, random
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 5 , ) -> list:
'''simple docstring'''
_UpperCAmelCase = [[-1] * number_of_cells] # Create a highway without any car
_UpperCAmelCase = 0
_UpperCAmelCase = max(_A , 0 )
while i < number_of_cells:
_UpperCAmelCase = (
randint(0 , _A ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def A ( _UpperCAmelCase : list , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = highway_now[car_index + 1 :]
for cell in range(len(_A ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_A , -1 )
def A ( _UpperCAmelCase : list , _UpperCAmelCase : float , _UpperCAmelCase : int ) -> list:
'''simple docstring'''
_UpperCAmelCase = len(_A )
# Beforce calculations, the highway is empty
_UpperCAmelCase = [-1] * number_of_cells
for car_index in range(_A ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_UpperCAmelCase = min(highway_now[car_index] + 1 , _A )
# Number of empty cell before the next car
_UpperCAmelCase = get_distance(_A , _A ) - 1
# We can't have the car causing an accident
_UpperCAmelCase = min(next_highway[car_index] , _A )
if random() < probability:
# Randomly, a driver will slow down
_UpperCAmelCase = max(next_highway[car_index] - 1 , 0 )
return next_highway
def A ( _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : int ) -> list:
'''simple docstring'''
_UpperCAmelCase = len(highway[0] )
for i in range(_A ):
_UpperCAmelCase = update(highway[i] , _A , _A )
_UpperCAmelCase = [-1] * number_of_cells
for car_index in range(_A ):
_UpperCAmelCase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_UpperCAmelCase = (car_index + speed) % number_of_cells
# Commit the change of position
_UpperCAmelCase = speed
highway.append(_A )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1.0e4 , _UpperCAmelCase = False , _UpperCAmelCase = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even"""
_a = float(embedding_dim // 2 )
_a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_a = min_timescale * jnp.exp(jnp.arange(_UpperCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
_a = jnp.expand_dims(_UpperCAmelCase , 1 ) * jnp.expand_dims(_UpperCAmelCase , 0 )
# scale embeddings
_a = scale * emb
if flip_sin_to_cos:
_a = jnp.concatenate([jnp.cos(_UpperCAmelCase ), jnp.sin(_UpperCAmelCase )] , axis=1 )
else:
_a = jnp.concatenate([jnp.sin(_UpperCAmelCase ), jnp.cos(_UpperCAmelCase )] , axis=1 )
_a = jnp.reshape(_UpperCAmelCase , [jnp.shape(_UpperCAmelCase )[0], embedding_dim] )
return signal
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
_A = 3_2
_A = jnp.floataa
@nn.compact
def __call__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict ):
_a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(SCREAMING_SNAKE_CASE_ )
_a = nn.silu(SCREAMING_SNAKE_CASE_ )
_a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(SCREAMING_SNAKE_CASE_ )
return temb
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
_A = 3_2
_A = False
_A = 1
@nn.compact
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return get_sinusoidal_embeddings(
SCREAMING_SNAKE_CASE_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 562 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowercase_ = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[int]:
_a = {}
state_dict.pop('pixel_mean' , _UpperCAmelCase )
state_dict.pop('pixel_std' , _UpperCAmelCase )
_a = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a = key.replace(_UpperCAmelCase , _UpperCAmelCase )
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
_a = int(re.match(_UpperCAmelCase , _UpperCAmelCase ).group(2 ) )
if layer_nb == 0:
_a = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
_a = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
_a = key.replace('layers.2' , 'proj_out' )
_a = value
_a = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="ybelkada/segment-anything" ) -> Optional[Any]:
_a = hf_hub_download(_UpperCAmelCase , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
_a = SamConfig()
elif "sam_vit_l" in model_name:
_a = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
_a = SamConfig(
vision_config=_UpperCAmelCase , )
elif "sam_vit_h" in model_name:
_a = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
_a = SamConfig(
vision_config=_UpperCAmelCase , )
_a = torch.load(_UpperCAmelCase , map_location='cpu' )
_a = replace_keys(_UpperCAmelCase )
_a = SamImageProcessor()
_a = SamProcessor(image_processor=_UpperCAmelCase )
_a = SamModel(_UpperCAmelCase )
hf_model.load_state_dict(_UpperCAmelCase )
_a = hf_model.to('cuda' )
_a = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
_a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
_a = [[[400, 650]]]
_a = [[1]]
_a = processor(images=np.array(_UpperCAmelCase ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_a = hf_model(**_UpperCAmelCase )
_a = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579890251159668
_a = processor(
images=np.array(_UpperCAmelCase ) , input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_a = hf_model(**_UpperCAmelCase )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712603092193604
_a = ((75, 275, 1725, 850),)
_a = processor(images=np.array(_UpperCAmelCase ) , input_boxes=_UpperCAmelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_a = hf_model(**_UpperCAmelCase )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686015605926514
# Test with 2 points and 1 image.
_a = [[[400, 650], [800, 650]]]
_a = [[1, 1]]
_a = processor(
images=np.array(_UpperCAmelCase ) , input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_a = hf_model(**_UpperCAmelCase )
_a = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936047792434692
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
lowercase_ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
lowercase_ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 562 | 1 |
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowercase( yaml.SafeLoader ):
'''simple docstring'''
def UpperCamelCase_ ( self: Union[str, Any], a_: Any ):
'''simple docstring'''
_snake_case : str = [self.constructed_objects[key_node] for key_node, _ in node.value]
_snake_case : Any = [tuple(a_ ) if isinstance(a_, a_ ) else key for key in keys]
_snake_case : Optional[int] = Counter(a_ )
_snake_case : Dict = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}" )
def UpperCamelCase_ ( self: Any, a_: List[Any], a_: List[str]=False ):
'''simple docstring'''
_snake_case : Any = super().construct_mapping(a_, deep=a_ )
self._check_no_duplicates_on_constructed_node(a_ )
return mapping
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[str] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_snake_case : List[Any] = full_content[1:].index("""---""" ) + 1
_snake_case : Dict = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(snake_case__ )
class lowercase( __a ):
'''simple docstring'''
lowercase__ = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase_ ( cls: List[Any], a_: Path ):
'''simple docstring'''
with open(a_, encoding="""utf-8""" ) as readme_file:
_snake_case , _snake_case : List[Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(a_ )
else:
return cls()
def UpperCamelCase_ ( self: Tuple, a_: Path ):
'''simple docstring'''
if path.exists():
with open(a_, encoding="""utf-8""" ) as readme_file:
_snake_case : List[str] = readme_file.read()
else:
_snake_case : Dict = None
_snake_case : List[str] = self._to_readme(a_ )
with open(a_, """w""", encoding="""utf-8""" ) as readme_file:
readme_file.write(a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[str] = None ):
'''simple docstring'''
if readme_content is not None:
_snake_case , _snake_case : Union[str, Any] = _split_yaml_from_readme(a_ )
_snake_case : str = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
_snake_case : Any = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def UpperCamelCase_ ( cls: Dict, a_: str ):
'''simple docstring'''
_snake_case : int = yaml.load(a_, Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_snake_case : Tuple = {
(key.replace("""-""", """_""" ) if key.replace("""-""", """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""", """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
}, sort_keys=a_, allow_unicode=a_, encoding="""utf-8""", ).decode("""utf-8""" )
A_ = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
A_ = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
A_ = ap.parse_args()
A_ = Path(args.readme_filepath)
A_ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 28 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ = '''pt'''
elif is_tf_available():
A_ = '''tf'''
else:
A_ = '''jax'''
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = ByTaTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().setUp()
_snake_case : List[str] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def UpperCamelCase_ ( self: List[Any], **a_: int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: List[Any]=False, a_: int=20, a_: Union[str, Any]=5 ):
'''simple docstring'''
_snake_case : List[Any] = []
for i in range(len(a_ ) ):
try:
_snake_case : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=a_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_snake_case : str = list(filter(lambda a_ : re.match(r"""^[ a-zA-Z]+$""", t[1] ), a_ ) )
_snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], add_special_tokens=a_ ), a_ ) )
if max_length is not None and len(a_ ) > max_length:
_snake_case : Tuple = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_snake_case : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : Tuple = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_snake_case : Dict = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ )
+ """ """
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_snake_case : Union[str, Any] = """ """ + output_txt
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
return output_txt, output_ids
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = self.ta_base_tokenizer
_snake_case : Optional[Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
_snake_case : int = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""], batch_without_eos_added["""input_ids"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.ta_base_tokenizer
_snake_case : Tuple = """Unicode €."""
_snake_case : List[Any] = tokenizer(a_ )
_snake_case : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : Tuple = tokenizer.decode(a_ )
self.assertEqual(a_, """Unicode €.</s>""" )
_snake_case : Tuple = tokenizer("""e è é ê ë""" )
_snake_case : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""], a_ )
# decoding
_snake_case : int = tokenizer.decode(a_ )
self.assertEqual(a_, """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ), """e è é ê ë</s>""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.ta_base_tokenizer
_snake_case : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
_snake_case : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_snake_case : int = tokenizer(a_, padding=a_, return_tensors=a_ )
self.assertIsInstance(a_, a_ )
if FRAMEWORK != "jax":
_snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
_snake_case : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a_, a_ )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors=a_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""", a_ )
self.assertIn("""attention_mask""", a_ )
self.assertNotIn("""decoder_input_ids""", a_ )
self.assertNotIn("""decoder_attention_mask""", a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.ta_base_tokenizer
_snake_case : Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
_snake_case : Optional[int] = tokenizer(
text_target=a_, max_length=32, padding="""max_length""", truncation=a_, return_tensors=a_ )
self.assertEqual(32, targets["""input_ids"""].shape[1] )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.ta_base_tokenizer
_snake_case : Optional[int] = ["""A long paragraph for summarization. </s>"""]
_snake_case : Dict = ["""Summary of the text. </s>"""]
# fmt: off
_snake_case : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_snake_case : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_snake_case : Optional[Any] = tokenizer(a_, text_target=a_ )
self.assertEqual(a_, batch["""input_ids"""][0] )
self.assertEqual(a_, batch["""labels"""][0] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : List[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : Dict = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
shutil.rmtree(a_ )
_snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
_snake_case : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_snake_case : Any = tokenizer.encode(a_, add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer.__class__.from_pretrained(a_ )
_snake_case : str = after_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
self.assertIn("""new_additional_special_token""", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
_snake_case : Optional[int] = tokenizer.__class__.from_pretrained(a_, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_, """special_tokens_map.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : Union[str, Any] = json.load(a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), encoding="""utf-8""" ) as json_file:
_snake_case : List[Any] = json.load(a_ )
_snake_case : int = [f"<extra_id_{i}>" for i in range(125 )]
_snake_case : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
_snake_case : Dict = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a_, """special_tokens_map.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
with open(os.path.join(a_, """tokenizer_config.json""" ), """w""", encoding="""utf-8""" ) as outfile:
json.dump(a_, a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case : Optional[int] = tokenizer_class.from_pretrained(
a_, )
self.assertIn(
"""an_additional_special_token""", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case : Union[str, Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""", lstrip=a_ )]
_snake_case : List[Any] = tokenizer_class.from_pretrained(
a_, additional_special_tokens=a_, )
self.assertIn("""a_new_additional_special_token""", tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ), )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
_snake_case : Optional[Any] = tokenizer_class.from_pretrained(a_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizers(fast=a_, do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
_snake_case : List[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[int] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_snake_case : Any = 0
_snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(
a_, skip_special_tokens=a_ )
for attr in attributes_list:
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, attr + """_id""", a_ )
self.assertEqual(getattr(a_, a_ ), a_ )
self.assertEqual(getattr(a_, attr + """_id""" ), a_ )
setattr(a_, """additional_special_tokens_ids""", [] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [] )
setattr(a_, """additional_special_tokens_ids""", [token_id_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens""" ), [token_to_test_setters] )
self.assertListEqual(getattr(a_, """additional_special_tokens_ids""" ), [token_id_to_test_setters] )
| 28 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_: Dict = logging.get_logger(__name__)
lowercase_: Optional[int] = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : int = 'glpn'
def __init__( self : Union[str, Any] , __a : Optional[Any]=3 , __a : Tuple=4 , __a : Dict=[2, 2, 2, 2] , __a : List[Any]=[8, 4, 2, 1] , __a : Union[str, Any]=[3_2, 6_4, 1_6_0, 2_5_6] , __a : Tuple=[7, 3, 3, 3] , __a : Optional[int]=[4, 2, 2, 2] , __a : Optional[int]=[1, 2, 5, 8] , __a : Tuple=[4, 4, 4, 4] , __a : Optional[int]="gelu" , __a : List[str]=0.0 , __a : List[Any]=0.0 , __a : str=0.02 , __a : int=0.1 , __a : Dict=1e-6 , __a : List[str]=6_4 , __a : Dict=1_0 , __a : Tuple=-1 , **__a : Optional[Any] , ):
super().__init__(**__a )
snake_case__ : int = num_channels
snake_case__ : List[str] = num_encoder_blocks
snake_case__ : Dict = depths
snake_case__ : int = sr_ratios
snake_case__ : List[str] = hidden_sizes
snake_case__ : Tuple = patch_sizes
snake_case__ : int = strides
snake_case__ : Optional[Any] = mlp_ratios
snake_case__ : Any = num_attention_heads
snake_case__ : Any = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : str = initializer_range
snake_case__ : Any = drop_path_rate
snake_case__ : str = layer_norm_eps
snake_case__ : List[str] = decoder_hidden_size
snake_case__ : str = max_depth
snake_case__ : Union[str, Any] = head_in_index
| 648 |
import math
from datetime import datetime, timedelta
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Union[str, Any] = year % 19
snake_case__ : Tuple = year % 4
snake_case__ : Any = year % 7
snake_case__ : Any = math.floor(year / 100)
snake_case__ : str = math.floor((13 + 8 * leap_day_inhibits) / 25)
snake_case__ : Any = leap_day_inhibits / 4
snake_case__ : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
snake_case__ : Tuple = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
snake_case__ : Tuple = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
snake_case__ : Any = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase_ , 4 , 19)
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase_ , 4 , 18)
else:
return datetime(UpperCAmelCase_ , 3 , 22) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday))
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowercase_: str = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 648 | 1 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def UpperCamelCase( lowercase_ ) -> List[Any]:
'''simple docstring'''
random.seed(lowercase_ )
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# ^^ safe to call this function even if cuda is not available
class __lowerCamelCase :
def __init__( self , lowerCamelCase , lowerCamelCase = 0.9999 , lowerCamelCase = 0.0 , lowerCamelCase = 0 , lowerCamelCase = False , lowerCamelCase = 1.0 , lowerCamelCase = 2 / 3 , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ) -> Optional[Any]:
if isinstance(lowerCamelCase , torch.nn.Module ):
snake_case_ = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCamelCase , standard_warn=lowerCamelCase , )
snake_case_ = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
snake_case_ = True
if kwargs.get("""max_value""" , lowerCamelCase ) is not None:
snake_case_ = """The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCamelCase , standard_warn=lowerCamelCase )
snake_case_ = kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCamelCase ) is not None:
snake_case_ = """The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCamelCase , standard_warn=lowerCamelCase )
snake_case_ = kwargs["""min_value"""]
snake_case_ = list(lowerCamelCase )
snake_case_ = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCamelCase ) is not None:
snake_case_ = """The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCamelCase , standard_warn=lowerCamelCase )
self.to(device=kwargs["""device"""] )
snake_case_ = None
snake_case_ = decay
snake_case_ = min_decay
snake_case_ = update_after_step
snake_case_ = use_ema_warmup
snake_case_ = inv_gamma
snake_case_ = power
snake_case_ = 0
snake_case_ = None # set in `step()`
snake_case_ = model_cls
snake_case_ = model_config
@classmethod
def lowerCAmelCase_ ( cls , lowerCamelCase , lowerCamelCase ) -> "EMAModel":
snake_case_ , snake_case_ = model_cls.load_config(lowerCamelCase , return_unused_kwargs=lowerCamelCase )
snake_case_ = model_cls.from_pretrained(lowerCamelCase )
snake_case_ = cls(model.parameters() , model_cls=lowerCamelCase , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase )
return ema_model
def lowerCAmelCase_ ( self , lowerCamelCase ) -> List[str]:
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
snake_case_ = self.model_cls.from_config(self.model_config )
snake_case_ = self.state_dict()
state_dict.pop("""shadow_params""" , lowerCamelCase )
model.register_to_config(**lowerCamelCase )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> float:
snake_case_ = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
snake_case_ = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
snake_case_ = (1 + step) / (10 + step)
snake_case_ = min(lowerCamelCase , self.decay )
# make sure decay is not smaller than min_decay
snake_case_ = max(lowerCamelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Tuple:
if isinstance(lowerCamelCase , torch.nn.Module ):
snake_case_ = (
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCamelCase , standard_warn=lowerCamelCase , )
snake_case_ = parameters.parameters()
snake_case_ = list(lowerCamelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
snake_case_ = self.get_decay(self.optimization_step )
snake_case_ = decay
snake_case_ = 1 - decay
snake_case_ = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
snake_case_ = deepspeed.zero.GatheredParameters(lowerCamelCase , modifier_rank=lowerCamelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> None:
snake_case_ = list(lowerCamelCase )
for s_param, param in zip(self.shadow_params , lowerCamelCase ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase_ ( self , lowerCamelCase=None , lowerCamelCase=None ) -> None:
snake_case_ = [
p.to(device=lowerCamelCase , dtype=lowerCamelCase ) if p.is_floating_point() else p.to(device=lowerCamelCase )
for p in self.shadow_params
]
def lowerCAmelCase_ ( self ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase_ ( self , lowerCamelCase ) -> None:
snake_case_ = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase_ ( self , lowerCamelCase ) -> None:
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
snake_case_ = None
def lowerCAmelCase_ ( self , lowerCamelCase ) -> None:
snake_case_ = copy.deepcopy(lowerCamelCase )
snake_case_ = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
snake_case_ = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase ):
raise ValueError("""Invalid min_decay""" )
snake_case_ = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase ):
raise ValueError("""Invalid optimization_step""" )
snake_case_ = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase ):
raise ValueError("""Invalid update_after_step""" )
snake_case_ = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase ):
raise ValueError("""Invalid use_ema_warmup""" )
snake_case_ = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
snake_case_ = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
snake_case_ = state_dict.get("""shadow_params""" , lowerCamelCase )
if shadow_params is not None:
snake_case_ = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCamelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" ) | 161 |
from typing import Any
def UpperCamelCase( lowercase_ ) -> list[Any]:
'''simple docstring'''
if not input_list:
return []
snake_case_ = [input_list.count(lowercase_ ) for value in input_list]
snake_case_ = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 161 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase_ = Features({"""audio""": Audio()} )
lowercase_ = Features({"""labels""": ClassLabel} )
lowercase_ = "audio"
lowercase_ = "labels"
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase__ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__A =copy.deepcopy(self )
__A =self.label_schema.copy()
__A =features[self.label_column]
__A =label_schema
return task_template
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 184 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 184 | 1 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Dict , __a : Any ):
snake_case__ : int = 3
snake_case__ : Any = 2_5_0
snake_case__ : Optional[Any] = ids_tensor((batch_size, length) , __a )
snake_case__ : Optional[int] = torch.ones((batch_size, length) , device=__a , dtype=torch.float ) / length
return input_ids, scores
def lowercase ( self : List[str] ):
snake_case__ , snake_case__ : str = self._get_tensors(5 )
snake_case__ : List[Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__a , __a ) )
snake_case__ , snake_case__ : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(__a , __a ) )
snake_case__ , snake_case__ : List[Any] = self._get_tensors(1_0 )
self.assertTrue(criteria(__a , __a ) )
def lowercase ( self : List[Any] ):
snake_case__ : Dict = MaxLengthCriteria(max_length=1_0 )
snake_case__ , snake_case__ : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(__a , __a ) )
snake_case__ , snake_case__ : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(__a , __a ) )
snake_case__ , snake_case__ : Optional[Any] = self._get_tensors(1_0 )
self.assertTrue(criteria(__a , __a ) )
def lowercase ( self : List[Any] ):
snake_case__ : str = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
snake_case__ , snake_case__ : Any = self._get_tensors(5 )
self.assertFalse(criteria(__a , __a ) )
snake_case__ , snake_case__ : Dict = self._get_tensors(9 )
self.assertFalse(criteria(__a , __a ) )
snake_case__ , snake_case__ : Optional[Any] = self._get_tensors(1_0 )
self.assertTrue(criteria(__a , __a ) )
snake_case__ : Optional[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def lowercase ( self : List[str] ):
snake_case__ , snake_case__ : Any = self._get_tensors(5 )
snake_case__ : Union[str, Any] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__a , __a ) )
snake_case__ : Tuple = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__a , __a ) )
def lowercase ( self : Optional[int] ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(__a ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
snake_case__ : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(__a ) , 1 )
| 127 |
import argparse
import os
import re
import packaging.version
lowercase_: str = 'examples/'
lowercase_: Any = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowercase_: List[str] = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
lowercase_: List[str] = 'README.md'
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
snake_case__ : List[Any] = f.read()
snake_case__ , snake_case__ : Any = REPLACE_PATTERNS[pattern]
snake_case__ : Any = replace.replace("""VERSION""" , UpperCAmelCase_)
snake_case__ : List[Any] = re_pattern.sub(UpperCAmelCase_ , UpperCAmelCase_)
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""") as f:
f.write(UpperCAmelCase_)
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCAmelCase_):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""")
if "legacy" in directories:
directories.remove("""legacy""")
for fname in fnames:
if fname.endswith(""".py"""):
update_version_in_file(os.path.join(UpperCAmelCase_ , UpperCAmelCase_) , UpperCAmelCase_ , pattern="""examples""")
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_=False):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
if not patch:
update_version_in_examples(UpperCAmelCase_)
def _lowercase ( ):
"""simple docstring"""
snake_case__ : Any = """🤗 Transformers currently provides the following architectures"""
snake_case__ : Optional[int] = """1. Want to contribute a new model?"""
with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
snake_case__ : Union[str, Any] = f.readlines()
# Find the start of the list.
snake_case__ : List[str] = 0
while not lines[start_index].startswith(_start_prompt):
start_index += 1
start_index += 1
snake_case__ : Tuple = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt):
if lines[index].startswith("""1."""):
snake_case__ : str = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""") as f:
f.writelines(UpperCAmelCase_)
def _lowercase ( ):
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""") as f:
snake_case__ : str = f.read()
snake_case__ : Dict = REPLACE_PATTERNS["""init"""][0].search(UpperCAmelCase_).groups()[0]
return packaging.version.parse(UpperCAmelCase_)
def _lowercase ( UpperCAmelCase_=False):
"""simple docstring"""
snake_case__ : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""")
if default_version.is_devrelease:
snake_case__ : Any = default_version.base_version
elif patch:
snake_case__ : Optional[Any] = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
snake_case__ : Dict = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
snake_case__ : Optional[int] = input(F'Which version are you releasing? [{default_version}]')
if len(UpperCAmelCase_) == 0:
snake_case__ : List[str] = default_version
print(F'Updating version to {version}.')
global_version_update(UpperCAmelCase_ , patch=UpperCAmelCase_)
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""")
clean_main_ref_in_model_list()
def _lowercase ( ):
"""simple docstring"""
snake_case__ : List[str] = get_version()
snake_case__ : Any = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
snake_case__ : List[str] = current_version.base_version
# Check with the user we got that right.
snake_case__ : Dict = input(F'Which version are we developing now? [{dev_version}]')
if len(UpperCAmelCase_) == 0:
snake_case__ : Optional[int] = dev_version
print(F'Updating version to {version}.')
global_version_update(UpperCAmelCase_)
print("""Cleaning main README, don't forget to run `make fix-copies`.""")
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase_: Dict = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowercase_: str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 127 | 1 |
from math import factorial
lowerCamelCase__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def UpperCamelCase ( lowercase_ ) -> int:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowercase_ ) )
def UpperCamelCase ( lowercase_ = 60 , lowercase_ = 1_00_00_00 ) -> int:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowercase__ : str = 0
# the cached sizes of the previous chains
lowercase__ : dict[int, int] = {}
for start_chain_element in range(1 , lowercase_ ):
# The temporary set will contain the elements of the chain
lowercase__ : Dict = set()
lowercase__ : Dict = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowercase__ : Optional[Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowercase_ )
chain_set_length += 1
lowercase__ : Any = digit_factorial_sum(lowercase_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowercase__ : Union[str, Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 12 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = RoCBertTokenizer
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : str = False
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = filter_non_english
def lowercase__ ( self):
'''simple docstring'''
super().setUp()
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Tuple = i
lowercase__ : Any = i
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""])
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[int] = tokenizer.tokenize("""你好[SEP]你是谁""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_) , [5, 6, 2, 5, 7, 8])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=["""[UNK]"""])
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowercase__ : Optional[int] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[Any] = i
lowercase__ : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""])
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """))
self.assertTrue(_is_whitespace("""\t"""))
self.assertTrue(_is_whitespace("""\r"""))
self.assertTrue(_is_whitespace("""\n"""))
self.assertTrue(_is_whitespace("""\u00A0"""))
self.assertFalse(_is_whitespace("""A"""))
self.assertFalse(_is_whitespace("""-"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005"""))
self.assertFalse(_is_control("""A"""))
self.assertFalse(_is_control(""" """))
self.assertFalse(_is_control("""\t"""))
self.assertFalse(_is_control("""\r"""))
def lowercase__ ( self):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-"""))
self.assertTrue(_is_punctuation("""$"""))
self.assertTrue(_is_punctuation("""`"""))
self.assertTrue(_is_punctuation("""."""))
self.assertFalse(_is_punctuation("""A"""))
self.assertFalse(_is_punctuation(""" """))
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
if self.test_rust_tokenizer:
lowercase__ : int = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
def lowercase__ ( self):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase__ : List[str] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , )
lowercase__ : str = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , """do_lower_case""") else False
lowercase__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""]))
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = ["""的""", """人""", """有"""]
lowercase__ : List[str] = """""".join(SCREAMING_SNAKE_CASE_)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : str = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Any = False
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_)
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ : Any = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_)
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
@slow
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
lowercase__ : Optional[Any] = tokenizer.encode("""你好""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.encode("""你是谁""" , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
lowercase__ : Optional[int] = """你好,你是谁"""
lowercase__ : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
| 12 | 1 |
from __future__ import annotations
from random import random
class a_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,snake_case : int | None = None ):
SCREAMING_SNAKE_CASE =value
SCREAMING_SNAKE_CASE =random()
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
def __repr__( self : str ):
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1 )
def __str__( self : List[Any] ):
SCREAMING_SNAKE_CASE =str(self.value ) + ' '
SCREAMING_SNAKE_CASE =str(self.left or '' )
SCREAMING_SNAKE_CASE =str(self.right or '' )
return value + left + right
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =split(root.left, lowerCAmelCase_ )
return left, root
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =split(root.right, lowerCAmelCase_ )
return root, right
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
SCREAMING_SNAKE_CASE =merge(left.right, lowerCAmelCase_ )
return left
else:
SCREAMING_SNAKE_CASE =merge(lowerCAmelCase_, right.left )
return right
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Node(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =split(lowerCAmelCase_, lowerCAmelCase_ )
return merge(merge(lowerCAmelCase_, lowerCAmelCase_ ), lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =split(lowerCAmelCase_, value - 1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =split(lowerCAmelCase_, lowerCAmelCase_ )
return merge(lowerCAmelCase_, lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value, end=',' )
inorder(root.right )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
SCREAMING_SNAKE_CASE =insert(lowerCAmelCase_, int(arg[1:] ) )
elif arg[0] == "-":
SCREAMING_SNAKE_CASE =erase(lowerCAmelCase_, int(arg[1:] ) )
else:
print('Unknown command' )
return root
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
SCREAMING_SNAKE_CASE =input()
while args != "q":
SCREAMING_SNAKE_CASE =interact_treap(lowerCAmelCase_, lowerCAmelCase_ )
print(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 252 |
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if not len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =equationa
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =equationa
# Calculate the determinants of the matrices
SCREAMING_SNAKE_CASE =aa * ba - aa * ba
SCREAMING_SNAKE_CASE =ca * ba - ca * ba
SCREAMING_SNAKE_CASE =aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
SCREAMING_SNAKE_CASE =determinant_x / determinant
SCREAMING_SNAKE_CASE =determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 252 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCAmelCase__ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCAmelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __lowercase ( _UpperCAmelCase ) -> str:
'''simple docstring'''
if "://" in dataset_path:
__lowercase = dataset_path.split("://" )[1]
return dataset_path
def __lowercase ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = not is_remote_filesystem(_lowerCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCAmelCase ) , fs._strip_protocol(_lowerCAmelCase ) )
else:
fs.mv(_lowerCAmelCase , _lowerCAmelCase , recursive=_lowerCAmelCase )
def __lowercase ( ) -> None:
'''simple docstring'''
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowercase = None
__lowercase = None
__lowercase = threading.Lock()
| 321 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase = n
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = w
def UpperCAmelCase__ ( self : Dict ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684 | 0 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
class lowerCAmelCase :
def __init__( self , a__ , a__=None ):
_UpperCAmelCase = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , a__ , getattr(a__ , a__ ) )
_UpperCAmelCase = module._original_module if isinstance(a__ , _PatchedModuleObj ) else module
class lowerCAmelCase :
lowerCAmelCase__ = []
def __init__( self , a__ , a__ , a__ , a__=None ):
_UpperCAmelCase = obj
_UpperCAmelCase = target
_UpperCAmelCase = new
_UpperCAmelCase = target.split('.' )[0]
_UpperCAmelCase = {}
_UpperCAmelCase = attrs or []
def __enter__( self ):
*_UpperCAmelCase , _UpperCAmelCase = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(a__ ) ):
try:
_UpperCAmelCase = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_UpperCAmelCase = getattr(self.obj , a__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(a__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_UpperCAmelCase = obj_attr
# patch at top level
setattr(self.obj , a__ , _PatchedModuleObj(a__ , attrs=self.attrs ) )
_UpperCAmelCase = getattr(self.obj , a__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(a__ , a__ , _PatchedModuleObj(getattr(a__ , a__ , a__ ) , attrs=self.attrs ) )
_UpperCAmelCase = getattr(a__ , a__ )
# finally set the target attribute
setattr(a__ , a__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_UpperCAmelCase = getattr(import_module('.'.join(a__ ) ) , a__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , a__ ) is attr_value:
_UpperCAmelCase = getattr(self.obj , a__ )
setattr(self.obj , a__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_UpperCAmelCase = globals()['__builtins__'][target_attr]
setattr(self.obj , a__ , self.new )
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self , *a__ ):
for attr in list(self.original ):
setattr(self.obj , a__ , self.original.pop(a__ ) )
def __A ( self ):
self.__enter__()
self._active_patches.append(self )
def __A ( self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 494 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
def __A ( self ):
_UpperCAmelCase = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
_UpperCAmelCase = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
_UpperCAmelCase = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
_UpperCAmelCase = tf_top_k_top_p_filtering(a__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
_UpperCAmelCase = output[output != -float('inf' )]
_UpperCAmelCase = tf.cast(
tf.where(tf.not_equal(a__ , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(a__ , a__ , rtol=1E-12 )
tf.debugging.assert_equal(a__ , a__ )
@require_tf
class lowerCAmelCase ( unittest.TestCase , snake_case ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowerCAmelCase__ = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def __A ( self ):
# TF-only test: tf.saved_model export
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_UpperCAmelCase = 2
_UpperCAmelCase = 2
class lowerCAmelCase ( tf.Module ):
def __init__( self , a__ ):
super(a__ , self ).__init__()
_UpperCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=a__ , )
def __A ( self , a__ , a__ ):
_UpperCAmelCase = self.model.generate(
input_ids=a__ , attention_mask=a__ , max_new_tokens=a__ , return_dict_in_generate=a__ , )
return {"sequences": outputs["sequences"]}
_UpperCAmelCase = [[2, 0], [1_02, 1_03]]
_UpperCAmelCase = [[1, 0], [1, 1]]
_UpperCAmelCase = DummyModel(model=a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a__ , a__ , signatures={'serving_default': dummy_model.serving} )
_UpperCAmelCase = tf.saved_model.load(a__ ).signatures['serving_default']
for batch_size in range(1 , len(a__ ) + 1 ):
_UpperCAmelCase = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
_UpperCAmelCase = serving_func(**a__ )['sequences']
_UpperCAmelCase = test_model.generate(**a__ , max_new_tokens=a__ )
tf.debugging.assert_equal(a__ , a__ )
@slow
def __A ( self ):
# TF-only test: tf.saved_model export
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_UpperCAmelCase = 1
_UpperCAmelCase = 2
class lowerCAmelCase ( tf.Module ):
def __init__( self , a__ ):
super(a__ , self ).__init__()
_UpperCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=a__ , )
def __A ( self , a__ , a__ ):
_UpperCAmelCase = self.model.generate(
input_ids=a__ , attention_mask=a__ , max_new_tokens=a__ , return_dict_in_generate=a__ , )
return {"sequences": outputs["sequences"]}
_UpperCAmelCase = [[2], [1_02, 1_03]]
_UpperCAmelCase = [[1], [1, 1]]
_UpperCAmelCase = DummyModel(model=a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a__ , a__ , signatures={'serving_default': dummy_model.serving} )
_UpperCAmelCase = tf.saved_model.load(a__ ).signatures['serving_default']
for input_row in range(len(a__ ) ):
_UpperCAmelCase = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
_UpperCAmelCase = serving_func(**a__ )['sequences']
_UpperCAmelCase = test_model.generate(**a__ , max_new_tokens=a__ )
tf.debugging.assert_equal(a__ , a__ )
@slow
@require_tensorflow_text
def __A ( self ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=a__ )
class lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self ):
super().__init__()
_UpperCAmelCase = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a__ , 'spiece.model' ) , 'rb' ).read() )
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def __A ( self , a__ , *a__ , **a__ ):
_UpperCAmelCase = self.tokenizer.tokenize(a__ )
_UpperCAmelCase , _UpperCAmelCase = text.pad_model_inputs(
a__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
_UpperCAmelCase = self.model.generate(input_ids=a__ , attention_mask=a__ )
return self.tokenizer.detokenize(a__ )
_UpperCAmelCase = CompleteSentenceTransformer()
_UpperCAmelCase = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
_UpperCAmelCase = complete_model(a__ )
_UpperCAmelCase = tf.keras.Model(a__ , a__ )
keras_model.save(a__ )
def __A ( self ):
# Has PT equivalent: this test relies on random sampling
_UpperCAmelCase = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
_UpperCAmelCase = 14
_UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_UpperCAmelCase = 'Hello, my dog is cute and'
_UpperCAmelCase = tokenizer(a__ , return_tensors='tf' )
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
_UpperCAmelCase = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
_UpperCAmelCase = model.generate(**a__ , eos_token_id=a__ , **a__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
_UpperCAmelCase = [6_38, 1_98]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
_UpperCAmelCase = model.generate(**a__ , eos_token_id=a__ , **a__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __A ( self ):
# Has PT equivalent: ample use of framework-specific code
_UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
_UpperCAmelCase = 'Hugging Face is a technology company based in New York and Paris.'
_UpperCAmelCase = bart_tokenizer(a__ , return_tensors='tf' ).input_ids
_UpperCAmelCase = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
_UpperCAmelCase = bart_model.generate(a__ ).numpy()
class lowerCAmelCase ( snake_case ):
def __A ( self , a__ , a__=None , **a__ ):
return super().call(a__ , **a__ )
_UpperCAmelCase = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
_UpperCAmelCase = bart_model.generate(a__ , foo='bar' ).numpy()
self.assertTrue(np.array_equal(a__ , a__ ) )
class lowerCAmelCase ( bart_model.model.encoder.__class__ ):
def __A ( self , a__ , **a__ ):
return super().call(a__ , **a__ )
_UpperCAmelCase = FakeEncoder(bart_model.config , bart_model.model.shared )
_UpperCAmelCase = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_UpperCAmelCase = bart_model.generate(a__ ).numpy()
with self.assertRaises(a__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a__ , foo='bar' )
| 494 | 1 |
'''simple docstring'''
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Dict:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError('String lengths must match!' )
snake_case__ = 0
for chara, chara in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
'''simple docstring'''
def __snake_case ( ):
lowerCamelCase_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowerCamelCase_ = 6
lowerCamelCase_ = 1
lowerCamelCase_ = 1901
lowerCamelCase_ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowerCamelCase_ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowerCamelCase_ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowerCamelCase_ = day - days_per_month[month - 2]
if month > 12:
year += 1
lowerCamelCase_ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 675 | 0 |
"""simple docstring"""
import functools
from typing import Any
def snake_case ( A__ ,A__ ):
# Validation
if not isinstance(A__ ,A__ ) or len(A__ ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(A__ ,A__ ) or not all(
isinstance(A__ ,A__ ) and len(A__ ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
UpperCAmelCase_ : dict[str, Any] = {}
UpperCAmelCase_ : Tuple = "WORD_KEEPER"
for word in words:
UpperCAmelCase_ : List[str] = trie
for c in word:
if c not in trie_node:
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[str] = trie_node[c]
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Optional[int] = len(A__ )
# Dynamic programming method
@functools.cache
def is_breakable(A__ ) -> bool:
if index == len_string:
return True
UpperCAmelCase_ : str = trie
for i in range(A__ ,A__ ):
UpperCAmelCase_ : Optional[Any] = trie_node.get(string[i] ,A__ )
if trie_node is None:
return False
if trie_node.get(A__ ,A__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 463 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''levit'''
def __init__( self : List[str] , lowerCAmelCase_ : int=224 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Tuple=[128, 256, 384] , lowerCAmelCase_ : Optional[int]=[4, 8, 12] , lowerCAmelCase_ : str=[4, 4, 4] , lowerCAmelCase_ : Dict=[16, 16, 16] , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : Optional[int]=[2, 2, 2] , lowerCAmelCase_ : Any=[2, 2, 2] , lowerCAmelCase_ : int=0.0_2 , **lowerCAmelCase_ : List[Any] , ) -> List[str]:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : str = kernel_size
UpperCAmelCase_ : List[Any] = stride
UpperCAmelCase_ : List[str] = padding
UpperCAmelCase_ : Any = hidden_sizes
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[str] = depths
UpperCAmelCase_ : int = key_dim
UpperCAmelCase_ : List[str] = drop_path_rate
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : Tuple = attention_ratio
UpperCAmelCase_ : Optional[int] = mlp_ratio
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : List[Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
return 1e-4
| 463 | 1 |
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Any ):
lowerCAmelCase = k_size // 2
lowerCAmelCase ,lowerCAmelCase = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) )
return g
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] ):
lowerCAmelCase ,lowerCAmelCase = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase = height - k_size + 1
lowerCAmelCase = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase = 0
for i, j in product(range(_UpperCAmelCase ) , range(_UpperCAmelCase ) ):
lowerCAmelCase = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase = gen_gaussian_kernel(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = ravel(_UpperCAmelCase )
# reshape and get the dst image
lowerCAmelCase = dot(_UpperCAmelCase , _UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase )
return dst
if __name__ == "__main__":
# read original image
__UpperCamelCase : Tuple = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
__UpperCamelCase : int = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCamelCase : Optional[Any] = gaussian_filter(gray, 3, sigma=1)
__UpperCamelCase : Tuple = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 4 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCamelCase : Optional[int] = pytest.mark.integration
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
lowerCAmelCase = dset.map(
lambda _snake_case , _snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_snake_case , keep_in_memory=_snake_case )
lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_snake_case )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertRaises(_snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
self.assertRaises(_snake_case , index.search_batch , queries[0] )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_snake_case ):
lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = faiss.IndexFlat(5 )
lowerCAmelCase = FaissIndex(custom_index=_snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ):
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase = 'index.faiss'
lowerCAmelCase = F'mock://{index_name}'
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = Elasticsearch()
lowerCAmelCase = {'acknowledged': True}
lowerCAmelCase = ElasticSearchIndex(es_client=_snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
# batched queries with timeout
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case , request_timeout=30 )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
| 4 | 1 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> Optional[Any]:
UpperCamelCase = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
UpperCamelCase ,UpperCamelCase = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCamelCase = F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCamelCase )
assert base_extractor.is_extractable(__UpperCamelCase )
UpperCamelCase = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(__UpperCamelCase , __UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCamelCase = file_path.read_text(encoding="""utf-8""" )
else:
UpperCamelCase = output_path.read_text(encoding="""utf-8""" )
UpperCamelCase = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> Union[str, Any]:
UpperCamelCase = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
UpperCamelCase = input_paths[compression_format]
if input_path is None:
UpperCamelCase = F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCamelCase )
UpperCamelCase = Extractor.infer_extractor_format(__UpperCamelCase )
assert extractor_format is not None
UpperCamelCase = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCamelCase = file_path.read_text(encoding="""utf-8""" )
else:
UpperCamelCase = output_path.read_text(encoding="""utf-8""" )
UpperCamelCase = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> int:
import tarfile
UpperCamelCase = tmp_path / """data_dot_dot"""
directory.mkdir()
UpperCamelCase = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(__UpperCamelCase , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def lowercase__ ( __UpperCamelCase )-> List[Any]:
import tarfile
UpperCamelCase = tmp_path / """data_sym_link"""
directory.mkdir()
UpperCamelCase = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=__UpperCamelCase )
with tarfile.TarFile(__UpperCamelCase , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
UpperCamelCase = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
UpperCamelCase = insecure_tar_files[insecure_tar_file]
UpperCamelCase = tmp_path / """extracted"""
TarExtractor.extract(__UpperCamelCase , __UpperCamelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowercase__ ( __UpperCamelCase )-> Any:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
UpperCamelCase = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
UpperCamelCase = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(__UpperCamelCase )
assert zipfile.is_zipfile(str(__UpperCamelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__UpperCamelCase ) # but we're right
| 35 |
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Dict = CanineTokenizer
lowerCAmelCase_ : Optional[Any] = False
def lowercase__ ( self : Optional[Any] ):
super().setUp()
lowerCAmelCase : int = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self : Any ):
return CanineTokenizer.from_pretrained('google/canine-s' )
def lowercase__ ( self : str , **UpperCAmelCase_ : List[str] ):
lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
lowerCAmelCase : int = 1024
return tokenizer
@require_torch
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : str = self.canine_tokenizer
lowerCAmelCase : List[Any] = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
lowerCAmelCase : Any = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0]
# fmt: on
lowerCAmelCase : Optional[int] = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Dict = list(batch.input_ids.numpy()[0] )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowercase__ ( self : str ):
lowerCAmelCase : Any = self.canine_tokenizer
lowerCAmelCase : Union[str, Any] = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
lowerCAmelCase : Dict = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors='pt' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids' , UpperCAmelCase_ )
self.assertIn('attention_mask' , UpperCAmelCase_ )
self.assertIn('token_type_ids' , UpperCAmelCase_ )
@require_torch
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.canine_tokenizer
lowerCAmelCase : List[Any] = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
lowerCAmelCase : List[Any] = tokenizer(
text_target=UpperCAmelCase_ , max_length=32 , padding='max_length' , truncation=UpperCAmelCase_ , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase__ ( self : Dict ):
# safety check on max_len default value so we are sure the test works
lowerCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase : Tuple = tempfile.mkdtemp()
lowerCAmelCase : Tuple = ' He is very happy, UNwant\u00E9d,running'
lowerCAmelCase : Dict = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
tokenizer.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.__class__.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = after_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
shutil.rmtree(UpperCAmelCase_ )
lowerCAmelCase : List[str] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase : Tuple = tempfile.mkdtemp()
lowerCAmelCase : Dict = ' He is very happy, UNwant\u00E9d,running'
lowerCAmelCase : List[Any] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCAmelCase : List[Any] = chr(0xe_007 )
additional_special_tokens.append(UpperCAmelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCAmelCase : Optional[Any] = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
tokenizer.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : str = tokenizer.__class__.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase : int = after_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIn(UpperCAmelCase_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase : str = tokenizer.__class__.from_pretrained(UpperCAmelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase_ )
def lowercase__ ( self : Any ):
lowerCAmelCase : str = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCAmelCase , lowerCAmelCase : int = self.get_clean_sequence(UpperCAmelCase_ )
# a special token for Canine can be defined as follows:
lowerCAmelCase : Any = 0xe_005
lowerCAmelCase : Optional[int] = chr(UpperCAmelCase_ )
tokenizer.add_special_tokens({'cls_token': special_token} )
lowerCAmelCase : Any = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertEqual(len(UpperCAmelCase_ ) , 1 )
lowerCAmelCase : int = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=UpperCAmelCase_ )
lowerCAmelCase : Any = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase : Dict = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase : Dict = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , input_encoded + special_token_id )
lowerCAmelCase : Tuple = tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
self.assertTrue(special_token not in decoded )
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCAmelCase : List[Any] = chr(0xe_005 )
lowerCAmelCase : Tuple = chr(0xe_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=UpperCAmelCase_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} )
lowerCAmelCase : Union[str, Any] = tokenizer.tokenize(UpperCAmelCase_ )
lowerCAmelCase : str = tokenizer.tokenize(UpperCAmelCase_ )
self.assertEqual(len(UpperCAmelCase_ ) , 1 )
self.assertEqual(len(UpperCAmelCase_ ) , 1 )
self.assertEqual(token_a[0] , UpperCAmelCase_ )
self.assertEqual(token_a[0] , UpperCAmelCase_ )
@require_tokenizers
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
lowerCAmelCase : Dict = 0xe_006
lowerCAmelCase : Dict = chr(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(UpperCAmelCase_ )
tokenizer.from_pretrained(UpperCAmelCase_ )
def lowercase__ ( self : int ):
lowerCAmelCase : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase : Tuple = json.load(UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase : Tuple = json.load(UpperCAmelCase_ )
# a special token for Canine can be defined as follows:
lowerCAmelCase : Optional[Any] = 0xe_006
lowerCAmelCase : Optional[int] = chr(UpperCAmelCase_ )
lowerCAmelCase : str = [new_token_a]
lowerCAmelCase : Any = [new_token_a]
with open(os.path.join(UpperCAmelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase : str = tokenizer_class.from_pretrained(UpperCAmelCase_ , extra_ids=0 )
self.assertIn(UpperCAmelCase_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCAmelCase : Dict = 0xe_007
lowerCAmelCase : Optional[Any] = chr(UpperCAmelCase_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase : List[Any] = [AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ )]
lowerCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , extra_ids=0 )
self.assertIn(UpperCAmelCase_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCAmelCase : Dict = 'hello world'
if self.space_between_special_tokens:
lowerCAmelCase : str = '[CLS] hello world [SEP]'
else:
lowerCAmelCase : Optional[Any] = input
lowerCAmelCase : int = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.decode(UpperCAmelCase_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(UpperCAmelCase_ , [output, output.lower()] )
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCAmelCase : int = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
lowerCAmelCase : List[str] = 'a'
lowerCAmelCase : List[Any] = ord(UpperCAmelCase_ )
for attr in attributes_list:
setattr(UpperCAmelCase_ , attr + '_id' , UpperCAmelCase_ )
self.assertEqual(getattr(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(getattr(UpperCAmelCase_ , attr + '_id' ) , UpperCAmelCase_ )
setattr(UpperCAmelCase_ , attr + '_id' , UpperCAmelCase_ )
self.assertEqual(getattr(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(getattr(UpperCAmelCase_ , attr + '_id' ) , UpperCAmelCase_ )
setattr(UpperCAmelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCAmelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCAmelCase_ , 'additional_special_tokens_ids' ) , [] )
lowerCAmelCase : Dict = 0xe_006
lowerCAmelCase : Optional[int] = chr(UpperCAmelCase_ )
setattr(UpperCAmelCase_ , 'additional_special_tokens_ids' , [additional_special_token_id] )
self.assertListEqual(getattr(UpperCAmelCase_ , 'additional_special_tokens' ) , [additional_special_token] )
self.assertListEqual(getattr(UpperCAmelCase_ , 'additional_special_tokens_ids' ) , [additional_special_token_id] )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : Union[str, Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Tuple ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : Dict ):
pass
def lowercase__ ( self : int ):
pass
def lowercase__ ( self : str ):
pass
| 343 |
class __A :
def __init__( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[Any] = name
lowerCAmelCase : int = val
def __str__( self : str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : Union[str, Any] , UpperCAmelCase_ : Dict ):
return self.val < other.val
class __A :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str ):
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Optional[Any] = self.build_heap(UpperCAmelCase_ )
def __getitem__( self : Union[str, Any] , UpperCAmelCase_ : str ):
return self.get_value(UpperCAmelCase_ )
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
return (idx - 1) // 2
def lowercase__ ( self : int , UpperCAmelCase_ : str ):
return idx * 2 + 1
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Any ):
return idx * 2 + 2
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[Any] ):
return self.heap_dict[key]
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[Any] = len(UpperCAmelCase_ ) - 1
lowerCAmelCase : Union[str, Any] = self.get_parent_idx(UpperCAmelCase_ )
for idx, i in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Any = idx
lowerCAmelCase : Union[str, Any] = i.val
for i in range(UpperCAmelCase_ , -1 , -1 ):
self.sift_down(UpperCAmelCase_ , UpperCAmelCase_ )
return array
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
while True:
lowerCAmelCase : Optional[int] = self.get_left_child_idx(UpperCAmelCase_ ) # noqa: E741
lowerCAmelCase : Union[str, Any] = self.get_right_child_idx(UpperCAmelCase_ )
lowerCAmelCase : Any = idx
if l < len(UpperCAmelCase_ ) and array[l] < array[idx]:
lowerCAmelCase : Tuple = l
if r < len(UpperCAmelCase_ ) and array[r] < array[smallest]:
lowerCAmelCase : Any = r
if smallest != idx:
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = array[smallest], array[idx]
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : List[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCAmelCase : List[str] = smallest
else:
break
def lowercase__ ( self : Any , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[Any] = self.get_parent_idx(UpperCAmelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.heap[idx], self.heap[p]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCAmelCase : Dict = p
lowerCAmelCase : Optional[Any] = self.get_parent_idx(UpperCAmelCase_ )
def lowercase__ ( self : str ):
return self.heap[0]
def lowercase__ ( self : int ):
lowerCAmelCase , lowerCAmelCase : str = self.heap[-1], self.heap[0]
lowerCAmelCase , lowerCAmelCase : Dict = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCAmelCase : Any = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase__ ( self : Any , UpperCAmelCase_ : Any ):
self.heap.append(UpperCAmelCase_ )
lowerCAmelCase : str = len(self.heap ) - 1
lowerCAmelCase : List[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase__ ( self : Optional[int] ):
return len(self.heap ) == 0
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCAmelCase : Optional[int] = new_value
lowerCAmelCase : str = new_value
self.sift_up(self.idx_of_element[node] )
__A : Tuple = Node('''R''', -1)
__A : int = Node('''B''', 6)
__A : int = Node('''A''', 3)
__A : Optional[Any] = Node('''X''', 1)
__A : List[str] = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__A : Optional[int] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 | 1 |
from __future__ import annotations
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , [] , SCREAMING_SNAKE_CASE )
return result
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE , level - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
current_list.pop()
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for i in total_list:
print(*SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = 4
__lowercase = 2
__lowercase = generate_all_combinations(n, k)
print_all_state(total_list)
| 452 | import requests
from bsa import BeautifulSoup
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE , params=SCREAMING_SNAKE_CASE ).content , '''html.parser''' )
__UpperCamelCase :int = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
__UpperCamelCase :Union[str, Any] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowercase = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 452 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__magic_name__ : Tuple = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class lowercase__ ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = load_tool("""text-question-answering""" )
self.tool.setup()
UpperCamelCase : Dict = load_tool("""text-question-answering""" , remote=_A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.tool(_A , """What did Hugging Face do in April 2021?""" )
self.assertEqual(_A , """launched the BigScience Research Workshop""" )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.remote_tool(_A , """What did Hugging Face do in April 2021?""" )
self.assertEqual(_A , """launched the BigScience Research Workshop""" )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.tool(text=_A , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(_A , """launched the BigScience Research Workshop""" )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.remote_tool(text=_A , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(_A , """launched the BigScience Research Workshop""" )
| 102 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case : List[Any] = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
snake_case : Tuple = {"""mobilebert-uncased""": 5_1_2}
snake_case : List[str] = {}
class UpperCamelCase__ ( a_):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Any="[UNK]" , UpperCamelCase_ : str="[SEP]" , UpperCamelCase_ : Any="[PAD]" , UpperCamelCase_ : Dict="[CLS]" , UpperCamelCase_ : str="[MASK]" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Dict , ):
'''simple docstring'''
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
__magic_name__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase_ ) != tokenize_chinese_chars
):
__magic_name__ = getattr(UpperCamelCase_ , normalizer_state.pop('type' ) )
__magic_name__ = do_lower_case
__magic_name__ = strip_accents
__magic_name__ = tokenize_chinese_chars
__magic_name__ = normalizer_class(**UpperCamelCase_ )
__magic_name__ = do_lower_case
def a__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any]=None ):
'''simple docstring'''
__magic_name__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
'''simple docstring'''
__magic_name__ = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ ) | 545 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__magic_name__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__magic_name__ = parser.parse_args()
__magic_name__ = '''cpu'''
__magic_name__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__magic_name__ = '''path-to-your-trained-model'''
__magic_name__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__magic_name__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__magic_name__ = pipe.to(device)
# to channels last
__magic_name__ = pipe.unet.to(memory_format=torch.channels_last)
__magic_name__ = pipe.vae.to(memory_format=torch.channels_last)
__magic_name__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__magic_name__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__magic_name__ = torch.randn(2, 4, 64, 64)
__magic_name__ = torch.rand(1) * 999
__magic_name__ = torch.randn(2, 77, 768)
__magic_name__ = (sample, timestep, encoder_hidden_status)
try:
__magic_name__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__magic_name__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__magic_name__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__magic_name__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__magic_name__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__magic_name__ = 666
__magic_name__ = torch.Generator(device).manual_seed(seed)
__magic_name__ = {'''generator''': generator}
if args.steps is not None:
__magic_name__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__magic_name__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 721 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _snake_case ):
"""simple docstring"""
def __init__( self :Union[str, Any] , lowercase__ :NestedDataStructureLike[PathLike] , lowercase__ :Optional[NamedSplit] = None , lowercase__ :Optional[Features] = None , lowercase__ :str = None , lowercase__ :bool = False , lowercase__ :bool = False , lowercase__ :Optional[int] = None , **lowercase__ :Any , ):
super().__init__(
lowercase__ , split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , num_proc=lowercase__ , **lowercase__ , )
lowercase = path_or_paths if isinstance(lowercase__ , lowercase__ ) else {self.split: path_or_paths}
lowercase = Text(
cache_dir=lowercase__ , data_files=lowercase__ , features=lowercase__ , **lowercase__ , )
def __UpperCAmelCase ( self :Any ):
# Build iterable dataset
if self.streaming:
lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase = None
lowercase = None
lowercase = None
lowercase = None
self.builder.download_and_prepare(
download_config=lowercase__ , download_mode=lowercase__ , verification_mode=lowercase__ , base_path=lowercase__ , num_proc=self.num_proc , )
lowercase = self.builder.as_dataset(
split=self.split , verification_mode=lowercase__ , in_memory=self.keep_in_memory )
return dataset
| 314 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_snake_case : List[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""audio_values""", """audio_mask"""]
def __init__( self, _a=20_48, _a=1, _a=[16, 16], _a=1_28, _a=4_41_00, _a=86, _a=20_48, _a=0.0, **_a, ) -> str:
super().__init__(
feature_size=_a, sampling_rate=_a, padding_value=_a, **_a, )
__SCREAMING_SNAKE_CASE = spectrogram_length
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = feature_size // self.patch_size[1]
__SCREAMING_SNAKE_CASE = n_fft
__SCREAMING_SNAKE_CASE = sampling_rate // hop_length_to_sampling_rate
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=_a, min_frequency=0.0, max_frequency=2_2050.0, sampling_rate=_a, norm="slaney", mel_scale="slaney", ).T
def __lowerCAmelCase ( self, _a ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = spectrogram(
_a, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, )
__SCREAMING_SNAKE_CASE = log_spec[:, :-1]
__SCREAMING_SNAKE_CASE = log_spec - 20.0
__SCREAMING_SNAKE_CASE = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, _a, _a = None, _a = True, _a = None, _a = False, _a = False, **_a, ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__SCREAMING_SNAKE_CASE = isinstance(_a, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(_a, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_a, np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa )
elif isinstance(_a, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__SCREAMING_SNAKE_CASE = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], _a ):
__SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__SCREAMING_SNAKE_CASE = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__SCREAMING_SNAKE_CASE = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__SCREAMING_SNAKE_CASE = np.array(_a ).astype(np.floataa )
# convert into correct format for padding
__SCREAMING_SNAKE_CASE = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__SCREAMING_SNAKE_CASE = np.ones([len(_a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = padded_audio_features * self.padding_value
for i in range(len(_a ) ):
__SCREAMING_SNAKE_CASE = audio_features[i]
__SCREAMING_SNAKE_CASE = feature
# return as BatchFeature
if return_attention_mask:
__SCREAMING_SNAKE_CASE = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
__SCREAMING_SNAKE_CASE = {"audio_values": padded_audio_features}
__SCREAMING_SNAKE_CASE = BatchFeature(data=_a, tensor_type=_a )
return encoded_inputs
| 693 |
def _A ( __snake_case :int , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _A ( __snake_case :float , __snake_case :float , __snake_case :float ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 | 1 |
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_lowerCAmelCase = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_lowerCAmelCase = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'{len(upper_files)} files contain uppercase characters:')
print('\n'.join(upper_files) + '\n')
_lowerCAmelCase = [file for file in filepaths if ' ' in file]
if space_files:
print(F'{len(space_files)} files contain space characters:')
print('\n'.join(space_files) + '\n')
_lowerCAmelCase = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F'{len(hyphen_files)} files contain hyphen characters:')
print('\n'.join(hyphen_files) + '\n')
_lowerCAmelCase = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'{len(nodir_files)} files are not in a directory:')
print('\n'.join(nodir_files) + '\n')
_lowerCAmelCase = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 348 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = (PNDMScheduler,)
_SCREAMING_SNAKE_CASE : Optional[int] = (("""num_inference_steps""", 50),)
def __snake_case ( self :int , **__magic_name__ :List[str] ) ->str:
lowercase : Dict = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__magic_name__ )
return config
def __snake_case ( self :Any , __magic_name__ :Any=0 , **__magic_name__ :List[Any] ) ->Optional[int]:
lowercase : List[Any] = dict(self.forward_default_kwargs )
lowercase : str = kwargs.pop("""num_inference_steps""" , __magic_name__ )
lowercase : Any = self.dummy_sample
lowercase : Optional[Any] = 0.1 * sample
lowercase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase : int = self.get_scheduler_config(**__magic_name__ )
lowercase : int = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
lowercase : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
lowercase : Union[str, Any] = scheduler_class.from_pretrained(__magic_name__ )
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
lowercase : Dict = dummy_past_residuals[:]
lowercase : Optional[int] = scheduler.step_prk(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
lowercase : Dict = new_scheduler.step_prk(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase : Union[str, Any] = scheduler.step_plms(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
lowercase : int = new_scheduler.step_plms(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __snake_case ( self :Optional[Any] ) ->Tuple:
pass
def __snake_case ( self :List[str] , __magic_name__ :str=0 , **__magic_name__ :Tuple ) ->Any:
lowercase : List[str] = dict(self.forward_default_kwargs )
lowercase : Optional[Any] = kwargs.pop("""num_inference_steps""" , __magic_name__ )
lowercase : Any = self.dummy_sample
lowercase : Optional[Any] = 0.1 * sample
lowercase : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase : List[str] = self.get_scheduler_config()
lowercase : Optional[Any] = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals (must be after setting timesteps)
lowercase : Optional[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
lowercase : Dict = scheduler_class.from_pretrained(__magic_name__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residual (must be after setting timesteps)
lowercase : int = dummy_past_residuals[:]
lowercase : List[str] = scheduler.step_prk(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
lowercase : Any = new_scheduler.step_prk(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase : Any = scheduler.step_plms(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
lowercase : int = new_scheduler.step_plms(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __snake_case ( self :List[Any] , **__magic_name__ :Optional[int] ) ->int:
lowercase : Any = self.scheduler_classes[0]
lowercase : Optional[int] = self.get_scheduler_config(**__magic_name__ )
lowercase : List[Any] = scheduler_class(**__magic_name__ )
lowercase : int = 10
lowercase : Union[str, Any] = self.dummy_model()
lowercase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowercase : Any = model(__magic_name__ , __magic_name__ )
lowercase : Union[str, Any] = scheduler.step_prk(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowercase : str = model(__magic_name__ , __magic_name__ )
lowercase : int = scheduler.step_plms(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def __snake_case ( self :List[Any] ) ->List[Any]:
lowercase : str = dict(self.forward_default_kwargs )
lowercase : Dict = kwargs.pop("""num_inference_steps""" , __magic_name__ )
for scheduler_class in self.scheduler_classes:
lowercase : str = self.get_scheduler_config()
lowercase : str = scheduler_class(**__magic_name__ )
lowercase : List[Any] = self.dummy_sample
lowercase : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ):
scheduler.set_timesteps(__magic_name__ )
elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ):
lowercase : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase : Union[str, Any] = dummy_past_residuals[:]
lowercase : Any = scheduler.step_prk(__magic_name__ , 0 , __magic_name__ , **__magic_name__ ).prev_sample
lowercase : Union[str, Any] = scheduler.step_prk(__magic_name__ , 1 , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase : List[Any] = scheduler.step_plms(__magic_name__ , 0 , __magic_name__ , **__magic_name__ ).prev_sample
lowercase : Optional[int] = scheduler.step_plms(__magic_name__ , 1 , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __snake_case ( self :Optional[Any] ) ->Optional[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=__magic_name__ )
def __snake_case ( self :int ) ->Tuple:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__magic_name__ )
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : int = self.get_scheduler_config(steps_offset=1 )
lowercase : str = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def __snake_case ( self :int ) ->int:
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=__magic_name__ , beta_end=__magic_name__ )
def __snake_case ( self :Tuple ) ->Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__magic_name__ )
def __snake_case ( self :Optional[int] ) ->Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__magic_name__ )
def __snake_case ( self :Optional[int] ) ->Any:
for t in [1, 5, 10]:
self.check_over_forward(time_step=__magic_name__ )
def __snake_case ( self :List[Any] ) ->Any:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__magic_name__ )
def __snake_case ( self :Optional[Any] ) ->int:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowercase : List[Any] = 27
for scheduler_class in self.scheduler_classes:
lowercase : List[Any] = self.dummy_sample
lowercase : Tuple = 0.1 * sample
lowercase : int = self.get_scheduler_config()
lowercase : str = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowercase : int = scheduler.step_prk(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
def __snake_case ( self :int ) ->Dict:
with self.assertRaises(__magic_name__ ):
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : List[Any] = self.get_scheduler_config()
lowercase : Dict = scheduler_class(**__magic_name__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __snake_case ( self :List[str] ) ->List[str]:
lowercase : Tuple = self.full_loop()
lowercase : Optional[Any] = torch.sum(torch.abs(__magic_name__ ) )
lowercase : Tuple = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def __snake_case ( self :str ) ->Union[str, Any]:
lowercase : Tuple = self.full_loop(prediction_type="""v_prediction""" )
lowercase : Dict = torch.sum(torch.abs(__magic_name__ ) )
lowercase : Any = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def __snake_case ( self :List[Any] ) ->List[str]:
# We specify different beta, so that the first alpha is 0.99
lowercase : Tuple = self.full_loop(set_alpha_to_one=__magic_name__ , beta_start=0.01 )
lowercase : Optional[int] = torch.sum(torch.abs(__magic_name__ ) )
lowercase : int = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def __snake_case ( self :Optional[int] ) ->Tuple:
# We specify different beta, so that the first alpha is 0.99
lowercase : str = self.full_loop(set_alpha_to_one=__magic_name__ , beta_start=0.01 )
lowercase : int = torch.sum(torch.abs(__magic_name__ ) )
lowercase : Optional[Any] = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 348 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowercase_ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 94 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ):
_A = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_A = math.floor(val / multiple ) * multiple
if x < min_val:
_A = math.ceil(val / multiple ) * multiple
return x
_A = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
_A ,_A = get_image_size(_lowercase )
_A ,_A = output_size
# determine new height and width
_A = output_height / input_height
_A = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_A = scale_width
else:
# fit height
_A = scale_height
_A = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
_A = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = ["pixel_values"]
def __init__( self: int , __A: bool = True , __A: Dict[str, int] = None , __A: PILImageResampling = PILImageResampling.BILINEAR , __A: bool = False , __A: int = 1 , __A: bool = True , __A: Union[int, float] = 1 / 2_55 , __A: bool = True , __A: Optional[Union[float, List[float]]] = None , __A: Optional[Union[float, List[float]]] = None , **__A: Optional[int] , ) -> None:
super().__init__(**__A )
_A = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
_A = get_size_dict(__A )
_A = do_resize
_A = size
_A = keep_aspect_ratio
_A = ensure_multiple_of
_A = resample
_A = do_rescale
_A = rescale_factor
_A = do_normalize
_A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self: Dict , __A: np.ndarray , __A: Dict[str, int] , __A: bool = False , __A: int = 1 , __A: PILImageResampling = PILImageResampling.BICUBIC , __A: Optional[Union[str, ChannelDimension]] = None , **__A: Union[str, Any] , ) -> np.ndarray:
_A = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_A = get_resize_output_image_size(
__A , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=__A , multiple=__A , )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def __A ( self: Optional[int] , __A: np.ndarray , __A: Union[int, float] , __A: Optional[Union[str, ChannelDimension]] = None , **__A: List[str] , ) -> Optional[int]:
return rescale(__A , scale=__A , data_format=__A , **__A )
def __A ( self: Optional[int] , __A: np.ndarray , __A: Union[float, List[float]] , __A: Union[float, List[float]] , __A: Optional[Union[str, ChannelDimension]] = None , **__A: Optional[Any] , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def __A ( self: Any , __A: ImageInput , __A: bool = None , __A: int = None , __A: bool = None , __A: int = None , __A: PILImageResampling = None , __A: bool = None , __A: float = None , __A: bool = None , __A: Optional[Union[float, List[float]]] = None , __A: Optional[Union[float, List[float]]] = None , __A: Optional[Union[str, TensorType]] = None , __A: ChannelDimension = ChannelDimension.FIRST , **__A: Tuple , ) -> PIL.Image.Image:
_A = do_resize if do_resize is not None else self.do_resize
_A = size if size is not None else self.size
_A = get_size_dict(__A )
_A = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_A = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_A = resample if resample is not None else self.resample
_A = do_rescale if do_rescale is not None else self.do_rescale
_A = rescale_factor if rescale_factor is not None else self.rescale_factor
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = image_mean if image_mean is not None else self.image_mean
_A = image_std if image_std is not None else self.image_std
_A = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_A = [to_numpy_array(__A ) for image in images]
if do_resize:
_A = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_rescale:
_A = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
_A = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
_A = [to_channel_dimension_format(__A , __A ) for image in images]
_A = {'''pixel_values''': images}
return BatchFeature(data=__A , tensor_type=__A )
def __A ( self: Union[str, Any] , __A: int , __A: List[Tuple] = None ) -> str:
_A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__A ):
_A = target_sizes.numpy()
_A = []
for idx in range(len(__A ) ):
_A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__A )
_A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
_A = logits.argmax(dim=1 )
_A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 62 |
def __A ( _lowercase ):
'''simple docstring'''
_A = [0] * len(_lowercase )
_A = []
_A = []
_A = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowercase ) ):
if indegree[i] == 0:
queue.append(_lowercase )
while queue:
_A = queue.pop(0 )
cnt += 1
topo.append(_lowercase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowercase )
if cnt != len(_lowercase ):
print('''Cycle exists''' )
else:
print(_lowercase )
# Adjacency List of Graph
__A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 62 | 1 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=13 , lowerCamelCase__ :List[str]=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Tuple=True , lowerCamelCase__ :Any=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :Union[str, Any]=5 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :List[str]=37 , lowerCamelCase__ :Any="gelu" , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :Optional[Any]=0.1 , lowerCamelCase__ :str=50 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :List[str]=None , ):
UpperCamelCase__ :Dict = parent
UpperCamelCase__ :Optional[Any] = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :Any = vocab_size
UpperCamelCase__ :str = hidden_size
UpperCamelCase__ :int = num_hidden_layers
UpperCamelCase__ :Dict = num_attention_heads
UpperCamelCase__ :Union[str, Any] = intermediate_size
UpperCamelCase__ :Optional[int] = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :List[Any] = attention_probs_dropout_prob
UpperCamelCase__ :List[Any] = max_position_embeddings
UpperCamelCase__ :Optional[int] = initializer_range
UpperCamelCase__ :str = use_labels
UpperCamelCase__ :Union[str, Any] = scope
def __a ( self :Optional[Any] ):
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase__ :str = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self :Optional[int] ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __a ( self :Any ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.prepare_config_and_inputs()
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __a ( self :Dict , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :List[Any] , **lowerCamelCase__ :str , ):
UpperCamelCase__ :Dict = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] , lowerCamelCase__ :str , **lowerCamelCase__ :Union[str, Any] , ):
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :List[str] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , )
UpperCamelCase__ :Any = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :Any , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , **lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :int = True
UpperCamelCase__ :List[str] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ , )
UpperCamelCase__ :List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ :Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Dict = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
UpperCamelCase__ :Optional[int] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ :Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :int = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def __a ( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Tuple , *lowerCamelCase__ :str , ):
UpperCamelCase__ :List[Any] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Optional[Any] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : Optional[int] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def __a ( self :List[Any] ):
UpperCamelCase__ :int = BertGenerationEncoderTester(self )
UpperCamelCase__ :Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Any ):
self.config_tester.run_common_tests()
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ :Optional[Any] = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def __a ( self :Dict ):
# This regression test was failing with PyTorch < 1.3
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ :Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
def __a ( self :str ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def __a ( self :Tuple ):
UpperCamelCase__ :Any = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
UpperCamelCase__ :List[str] = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
UpperCamelCase__ :str = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape , lowerCamelCase__ )
UpperCamelCase__ :Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :int ):
UpperCamelCase__ :str = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
UpperCamelCase__ :Optional[Any] = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :str = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape , lowerCamelCase__ )
UpperCamelCase__ :Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) ) | 45 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase : str = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = ["PerceiverFeatureExtractor"]
_UpperCamelCase : Optional[int] = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_UpperCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 599 | 0 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_A = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_A = 10
_A = 2_56
def lowerCamelCase__ ( __lowerCAmelCase : List[str] ):
"""simple docstring"""
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
lowerCAmelCase_ = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class _lowerCAmelCase :
def __init__( self , *,
_UpperCamelCase = 0.85 , ) -> Dict:
lowerCAmelCase_ = duplication_jaccard_threshold
lowerCAmelCase_ = NUM_PERM
lowerCAmelCase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCAmelCase_ = defaultdict(_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> None:
lowerCAmelCase_ = self._index.query(_UpperCamelCase )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_UpperCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_UpperCamelCase )
def __a ( self ) -> List[List[Dict]]:
lowerCAmelCase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCAmelCase_ = [base] + list(_UpperCamelCase )
# reformat the cluster to be a list of dict
lowerCAmelCase_ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(_UpperCamelCase )
return duplicate_clusters
def __a ( self , _UpperCamelCase ) -> None:
lowerCAmelCase_ = self.get_duplicate_clusters()
with open(_UpperCamelCase , "w" ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
def lowerCamelCase__ ( __lowerCAmelCase : Tuple ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = element
lowerCAmelCase_ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase__ ( __lowerCAmelCase : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCamelCase__ ( __lowerCAmelCase : Type[Dataset] , __lowerCAmelCase : float ):
"""simple docstring"""
lowerCAmelCase_ = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=100 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = get_tokens(__lowerCAmelCase )
lowerCAmelCase_ = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_A = None
def lowerCamelCase__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = []
for elementa in cluster:
lowerCAmelCase_ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
lowerCAmelCase_ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCAmelCase_ = 1
extremes.append(__lowerCAmelCase )
return extremes
def lowerCamelCase__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
lowerCAmelCase_ = dataset
lowerCAmelCase_ = []
lowerCAmelCase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def lowerCamelCase__ ( __lowerCAmelCase : Type[Dataset] , __lowerCAmelCase : float = 0.85 ):
"""simple docstring"""
lowerCAmelCase_ = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
lowerCAmelCase_ = {}
lowerCAmelCase_ = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowerCAmelCase_ = element
lowerCAmelCase_ = duplicate_indices - set(extreme_dict.keys() )
lowerCAmelCase_ = dataset.filter(lambda __lowerCAmelCase , __lowerCAmelCase : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCAmelCase_ = element["base_index"] in extreme_dict
if element["is_extreme"]:
lowerCAmelCase_ = extreme_dict[element["base_index"]]["copies"]
print(F"""Original dataset size: {len(__lowerCAmelCase )}""" )
print(F"""Number of duplicate clusters: {len(__lowerCAmelCase )}""" )
print(F"""Files in duplicate cluster: {len(__lowerCAmelCase )}""" )
print(F"""Unique files in duplicate cluster: {len(__lowerCAmelCase )}""" )
print(F"""Filtered dataset size: {len(__lowerCAmelCase )}""" )
return ds_filter, duplicate_clusters
| 279 |
import math
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase=0 ) -> Tuple: # a graph with Node 0,1,...,N-1
lowerCAmelCase_ = n
lowerCAmelCase_ = [
[math.inf for j in range(0 , _UpperCamelCase )] for i in range(0 , _UpperCamelCase )
] # adjacency matrix for weight
lowerCAmelCase_ = [
[math.inf for j in range(0 , _UpperCamelCase )] for i in range(0 , _UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
lowerCAmelCase_ = w
def __a ( self ) -> List[str]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCAmelCase_ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
return self.dp[u][v]
if __name__ == "__main__":
_A = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 279 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase_ = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def SCREAMING_SNAKE_CASE ( ):
__a = Github(os.environ['GITHUB_TOKEN'] )
__a = g.get_repo('huggingface/diffusers' )
__a = repo.get_issues(state='open' )
for issue in open_issues:
__a = sorted(issue.get_comments() , key=lambda a_ : i.created_at , reverse=a_ )
__a = comments[0] if len(a_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 539 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def SCREAMING_SNAKE_CASE ( a_ : Tuple , a_ : Dict ):
__a = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
__a = DatasetInfosDict.from_directory(a_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def SCREAMING_SNAKE_CASE ( a_ : Optional[int] , a_ : DatasetInfo ):
__a = str(a_ )
dataset_info.write_to_directory(a_ )
__a = DatasetInfo.from_directory(a_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(a_ , 'dataset_info.json' ) )
def SCREAMING_SNAKE_CASE ( ):
__a = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
__a = dataset_info._to_yaml_dict()
assert sorted(a_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__a = yaml.safe_dump(a_ )
__a = yaml.safe_load(a_ )
assert dataset_info_yaml_dict == reloaded
def SCREAMING_SNAKE_CASE ( ):
__a = DatasetInfo()
__a = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def SCREAMING_SNAKE_CASE ( a_ : List[str] , a_ : DatasetInfosDict ):
__a = str(a_ )
dataset_infos_dict.write_to_directory(a_ )
__a = DatasetInfosDict.from_directory(a_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__a = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__a = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(a_ , 'README.md' ) )
| 539 | 1 |
UpperCamelCase__ = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_02_17_66_34e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_58_18,
}
def _UpperCamelCase (a__ :str , a__ :str , a__ :float ):
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCamelCase__ = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 548 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
UpperCamelCase__ = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(a__ )
# Let's go
UpperCamelCase__ = parser.parse_args()
if not hasattr(a__ , """func""" ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ = args.func(a__ )
service.run()
if __name__ == "__main__":
main()
| 548 | 1 |
"""simple docstring"""
import operator
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : bool = False , snake_case_ : list | None = None ) -> list:
"""simple docstring"""
_lowerCAmelCase = operator.lt if reverse else operator.gt
_lowerCAmelCase = solution or []
if not arr:
return solution
_lowerCAmelCase = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_ , sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
_lowerCAmelCase = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_ , snake_case_ ):
solution.insert(snake_case_ , snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_ , snake_case_ , snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 156 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = PhobertTokenizer
__UpperCamelCase = False
def A__ (self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
_lowerCAmelCase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
_lowerCAmelCase = ["""#version: 0.2""", """l à</w>"""]
_lowerCAmelCase = {"""unk_token""": """<unk>"""}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase ) )
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = """Tôi là VinAI Research"""
_lowerCAmelCase = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase = """Tôi là VinAI Research"""
_lowerCAmelCase = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
_lowerCAmelCase = tokenizer.tokenize(lowerCamelCase )
print(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = tokens + [tokenizer.unk_token]
_lowerCAmelCase = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase ) | 156 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
__snake_case : Optional[int] = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE )
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :List[str] = tmp_path_factory.getbasetemp() / 'cache'
UpperCAmelCase__ :List[Any] = test_hf_cache_home / 'datasets'
UpperCAmelCase__ :str = test_hf_cache_home / 'metrics'
UpperCAmelCase__ :Optional[int] = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ :Any = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ :Union[str, Any] = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE , scope='session' )
def A ( ):
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=SCREAMING_SNAKE_CASE )
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , SCREAMING_SNAKE_CASE )
@pytest.fixture
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , SCREAMING_SNAKE_CASE )
| 703 |
from math import isqrt
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :str = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ :List[Any] = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def A ( SCREAMING_SNAKE_CASE = 10**8 ):
"""simple docstring"""
UpperCAmelCase__ :Any = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ :Optional[Any] = 0
UpperCAmelCase__ :List[str] = 0
UpperCAmelCase__ :Dict = len(SCREAMING_SNAKE_CASE ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 433 | 0 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowercase_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 235 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
# load base model
lowercase__ = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowercase__ = load_file(_SCREAMING_SNAKE_CASE )
lowercase__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase__ = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
lowercase__ = pipeline.text_encoder
else:
lowercase__ = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
lowercase__ = pipeline.unet
# find the target layer
lowercase__ = layer_infos.pop(0 )
while len(_SCREAMING_SNAKE_CASE ) > -1:
try:
lowercase__ = curr_layer.__getattr__(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = layer_infos.pop(0 )
elif len(_SCREAMING_SNAKE_CASE ) == 0:
break
except Exception:
if len(_SCREAMING_SNAKE_CASE ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowercase__ = layer_infos.pop(0 )
lowercase__ = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(_SCREAMING_SNAKE_CASE )
else:
pair_keys.append(_SCREAMING_SNAKE_CASE )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowercase__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowercase__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).unsqueeze(2 ).unsqueeze(3 )
else:
lowercase__ = state_dict[pair_keys[0]].to(torch.floataa )
lowercase__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# update visited list
for item in pair_keys:
visited.append(_SCREAMING_SNAKE_CASE )
return pipeline
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
lowercase_ = parser.parse_args()
lowercase_ = args.base_model_path
lowercase_ = args.checkpoint_path
lowercase_ = args.dump_path
lowercase_ = args.lora_prefix_unet
lowercase_ = args.lora_prefix_text_encoder
lowercase_ = args.alpha
lowercase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowercase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 235 | 1 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=False ):
"""simple docstring"""
lowerCamelCase__ : int =OmegaConf.load(__UpperCAmelCase )
if display:
print(yaml.dump(OmegaConf.to_container(__UpperCAmelCase ) ) )
return config
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str]=None ):
"""simple docstring"""
if conf_path is None:
lowerCamelCase__ : List[Any] ='''./model_checkpoints/vqgan_only.yaml'''
lowerCamelCase__ : List[str] =load_config(__UpperCAmelCase , display=__UpperCAmelCase )
lowerCamelCase__ : Tuple =VQModel(**config.model.params )
if ckpt_path is None:
lowerCamelCase__ : Dict ='''./model_checkpoints/vqgan_only.pt'''
lowerCamelCase__ : int =torch.load(__UpperCAmelCase , map_location=__UpperCAmelCase )
if ".ckpt" in ckpt_path:
lowerCamelCase__ : Any =sd['''state_dict''']
model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
model.to(__UpperCAmelCase )
del sd
return model
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] =model.encode(__UpperCAmelCase )
print(f'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
lowerCamelCase__ : Optional[int] =model.decode(__UpperCAmelCase )
return xrec
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Dict=False ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Any =string.rsplit('''.''' , 1 )
if reload:
lowerCamelCase__ : List[str] =importlib.import_module(__UpperCAmelCase )
importlib.reload(__UpperCAmelCase )
return getattr(importlib.import_module(__UpperCAmelCase , package=__UpperCAmelCase ) , cls )
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str]=True , __lowerCamelCase : List[str]=True ):
"""simple docstring"""
lowerCamelCase__ : Any =instantiate_from_config(__UpperCAmelCase )
if sd is not None:
model.load_state_dict(__UpperCAmelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
if ckpt:
lowerCamelCase__ : List[Any] =torch.load(__UpperCAmelCase , map_location='''cpu''' )
lowerCamelCase__ : Optional[int] =pl_sd['''global_step''']
print(f'''loaded model from global step {global_step}.''' )
else:
lowerCamelCase__ : Dict ={'''state_dict''': None}
lowerCamelCase__ : Optional[Any] =None
lowerCamelCase__ : Optional[Any] =load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=__UpperCAmelCase , eval_mode=__UpperCAmelCase )['''model''']
return model, global_step
| 713 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =VideoMAEConfig()
set_architecture_configs(__lowerCamelCase , __lowerCamelCase )
if "finetuned" not in model_name:
lowerCamelCase__ : int =False
if "finetuned" in model_name:
lowerCamelCase__ : str ='''huggingface/label-files'''
if "kinetics" in model_name:
lowerCamelCase__ : List[Any] =400
lowerCamelCase__ : Optional[int] ='''kinetics400-id2label.json'''
elif "ssv2" in model_name:
lowerCamelCase__ : Tuple =174
lowerCamelCase__ : Optional[Any] ='''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
lowerCamelCase__ : Optional[int] =json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[Any] ={int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict =idalabel
lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()}
return config
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if "small" in model_name:
lowerCamelCase__ : Optional[Any] =384
lowerCamelCase__ : List[Any] =1536
lowerCamelCase__ : int =12
lowerCamelCase__ : Dict =16
lowerCamelCase__ : List[Any] =12
lowerCamelCase__ : Optional[Any] =3
lowerCamelCase__ : Union[str, Any] =192
lowerCamelCase__ : str =768
elif "large" in model_name:
lowerCamelCase__ : Union[str, Any] =1024
lowerCamelCase__ : str =4096
lowerCamelCase__ : int =24
lowerCamelCase__ : Dict =16
lowerCamelCase__ : Union[str, Any] =12
lowerCamelCase__ : List[Any] =8
lowerCamelCase__ : int =512
lowerCamelCase__ : Optional[Any] =2048
elif "huge" in model_name:
lowerCamelCase__ : Optional[int] =1280
lowerCamelCase__ : Optional[int] =5120
lowerCamelCase__ : List[Any] =32
lowerCamelCase__ : List[Any] =16
lowerCamelCase__ : Optional[Any] =12
lowerCamelCase__ : Dict =8
lowerCamelCase__ : List[Any] =640
lowerCamelCase__ : Any =2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
if "encoder." in name:
lowerCamelCase__ : Optional[int] =name.replace('''encoder.''' , '''''' )
if "cls_token" in name:
lowerCamelCase__ : List[Any] =name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase__ : Any =name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ : List[Any] =name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
lowerCamelCase__ : Tuple =name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowerCamelCase__ : Dict =name.replace('''blocks''' , '''videomae.encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
lowerCamelCase__ : List[str] =name.replace('''attn''' , '''attention.self''' )
if "attn" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('''attn''' , '''attention.attention''' )
if "norm1" in name:
lowerCamelCase__ : Tuple =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : int =name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowerCamelCase__ : Any =name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowerCamelCase__ : Optional[Any] =name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowerCamelCase__ : Any =name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : str =name.replace('''norm.weight''' , '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase__ : Optional[int] =name.replace('''norm.bias''' , '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
lowerCamelCase__ : List[str] =name.replace('''head''' , '''classifier''' )
return name
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Dict =orig_state_dict.pop(__lowerCamelCase )
if key.startswith('''encoder.''' ):
lowerCamelCase__ : Optional[int] =key.replace('''encoder.''' , '''''' )
if "qkv" in key:
lowerCamelCase__ : Any =key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
lowerCamelCase__ : Tuple =config.decoder_hidden_size
lowerCamelCase__ : str =int(key_split[2] )
lowerCamelCase__ : Any ='''decoder.decoder_layers.'''
if "weight" in key:
lowerCamelCase__ : List[Any] =val[:dim, :]
lowerCamelCase__ : Any =val[dim : dim * 2, :]
lowerCamelCase__ : Dict =val[-dim:, :]
else:
lowerCamelCase__ : Optional[Any] =config.hidden_size
lowerCamelCase__ : Optional[Any] =int(key_split[1] )
lowerCamelCase__ : str ='''videomae.encoder.layer.'''
if "weight" in key:
lowerCamelCase__ : int =val[:dim, :]
lowerCamelCase__ : Tuple =val[dim : dim * 2, :]
lowerCamelCase__ : List[Any] =val[-dim:, :]
else:
lowerCamelCase__ : int =val
return orig_state_dict
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase__ : Optional[Any] =np.load(__lowerCamelCase )
return list(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : str =get_videomae_config(__lowerCamelCase )
if "finetuned" in model_name:
lowerCamelCase__ : Tuple =VideoMAEForVideoClassification(__lowerCamelCase )
else:
lowerCamelCase__ : int =VideoMAEForPreTraining(__lowerCamelCase )
# download original checkpoint, hosted on Google Drive
lowerCamelCase__ : Union[str, Any] ='''pytorch_model.bin'''
gdown.cached_download(__lowerCamelCase , __lowerCamelCase , quiet=__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )
if "model" in files:
lowerCamelCase__ : Dict =files['''model''']
else:
lowerCamelCase__ : str =files['''module''']
lowerCamelCase__ : Optional[Any] =convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify model on basic input
lowerCamelCase__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCamelCase__ : int =prepare_video()
lowerCamelCase__ : Tuple =image_processor(__lowerCamelCase , return_tensors='''pt''' )
if "finetuned" not in model_name:
lowerCamelCase__ : Tuple =hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase )
lowerCamelCase__ : int =model(**__lowerCamelCase )
lowerCamelCase__ : Dict =outputs.logits
lowerCamelCase__ : List[str] =[
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCamelCase__ : int =torch.Size([1, 174] )
lowerCamelCase__ : Dict =torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
lowerCamelCase__ : List[str] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
lowerCamelCase__ : List[Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[str] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCamelCase__ : str =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
lowerCamelCase__ : Union[str, Any] =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : List[Any] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : str =torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCamelCase__ : Any =torch.Size([1, 400] )
lowerCamelCase__ : Optional[int] =torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCamelCase__ : List[str] =torch.Size([1, 400] )
lowerCamelCase__ : Dict =torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCamelCase__ : str =torch.Size([1, 400] )
lowerCamelCase__ : Any =torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
lowerCamelCase__ : Tuple =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : Dict =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCamelCase__ : Optional[int] =torch.Size([1, 174] )
lowerCamelCase__ : Any =torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
lowerCamelCase__ : Dict =torch.Size([1, 1408, 1536] )
lowerCamelCase__ : str =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCamelCase__ : str =torch.Size([1, 174] )
lowerCamelCase__ : int =torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 )
else:
print('''Logits:''' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCamelCase__ : str =outputs.loss
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowerCamelCase , organization='''nielsr''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 625 | 0 |
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE : Union[str, Any] = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
SCREAMING_SNAKE_CASE : List[Any] = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
SCREAMING_SNAKE_CASE : Optional[Any] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def _UpperCAmelCase ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str]=False ):
if return_pvalue:
__a = pearsonr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0] )}
| 197 | import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
SCREAMING_SNAKE_CASE : Union[str, Any] = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def __A ( _A , _A ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __A ( _A ):
"""simple docstring"""
__a = _TestCommandArgs(dataset=_A , all_configs=_A , save_infos=_A )
__a = TestCommand(*_A )
test_command.run()
__a = os.path.join(_A , "README.md" )
assert os.path.exists(_A )
__a = DatasetInfosDict.from_directory(_A )
__a = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__a , __a = getattr(dataset_infos["default"] , _A ), getattr(expected_dataset_infos["default"] , _A )
if key == "num_bytes":
assert is_apercent_close(_A , _A )
elif key == "splits":
assert list(_A ) == list(_A )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 197 | 1 |
'''simple docstring'''
from typing import Any
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = data
UpperCAmelCase_ : Optional[Any] = None
def __repr__( self ):
'''simple docstring'''
return F'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = None
def __iter__( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.head
while node:
yield node.data
UpperCAmelCase_ : List[Any] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(snake_case_ ) for item in self] )
def __getitem__( self , snake_case_ ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
UpperCAmelCase_ : Any = self.head
for _ in range(snake_case_ ):
UpperCAmelCase_ : Optional[int] = current.next
UpperCAmelCase_ : Optional[int] = data
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
self.insert_nth(len(self ) , snake_case_ )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
self.insert_nth(0 , snake_case_ )
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
UpperCAmelCase_ : Any = Node(snake_case_ )
if self.head is None:
UpperCAmelCase_ : Optional[Any] = new_node
elif index == 0:
UpperCAmelCase_ : Dict = self.head # link new_node to head
UpperCAmelCase_ : Tuple = new_node
else:
UpperCAmelCase_ : str = self.head
for _ in range(index - 1 ):
UpperCAmelCase_ : int = temp.next
UpperCAmelCase_ : Union[str, Any] = temp.next
UpperCAmelCase_ : int = new_node
def _UpperCamelCase ( self ): # print every node data
'''simple docstring'''
print(self )
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def _UpperCamelCase ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def _UpperCamelCase ( self , snake_case_ = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
UpperCAmelCase_ : List[str] = self.head # default first node
if index == 0:
UpperCAmelCase_ : List[str] = self.head.next
else:
UpperCAmelCase_ : Optional[Any] = self.head
for _ in range(index - 1 ):
UpperCAmelCase_ : List[Any] = temp.next
UpperCAmelCase_ : Optional[int] = temp.next
UpperCAmelCase_ : Optional[int] = temp.next.next
return delete_node.data
def _UpperCamelCase ( self ):
'''simple docstring'''
return self.head is None
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Optional[Any] = self.head
while current:
# Store the current node's next node.
UpperCAmelCase_ : int = current.next
# Make the current node's next point backwards
UpperCAmelCase_ : Dict = prev
# Make the previous node be the current node
UpperCAmelCase_ : Union[str, Any] = current
# Make the current node the next node (to progress iteration)
UpperCAmelCase_ : List[Any] = next_node
# Return prev in order to put the head at the end
UpperCAmelCase_ : str = prev
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCamelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCamelCase_ ) == i
linked_list.insert_nth(lowerCamelCase_ , i + 1 )
assert str(lowerCamelCase_ ) == "->".join(str(lowerCamelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCamelCase_ ) == "->".join(str(lowerCamelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCamelCase_ ) == 9
assert str(lowerCamelCase_ ) == "->".join(str(lowerCamelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
UpperCAmelCase_ : List[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCamelCase_ ) == "->".join(str(lowerCamelCase_ ) for i in range(-8 , 1 ) )
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
UpperCAmelCase_ : List[str] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCamelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCamelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCAmelCase_ : Optional[int] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCamelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCAmelCase_ : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCamelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCAmelCase_ : int = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCamelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCamelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCamelCase_ )
assert (
str(lowerCamelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCamelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _lowerCamelCase ( ):
"""simple docstring"""
from doctest import testmod
testmod()
UpperCAmelCase_ : Tuple = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCamelCase_ )
print('\nReading/changing Node data using indexing:' )
print(F'''Element at Position 1: {linked_list[1]}''' )
UpperCAmelCase_ : Dict = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCamelCase_ )
print(F'''length of linked_list is : {len(lowerCamelCase_ )}''' )
if __name__ == "__main__":
main()
| 715 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
UpperCAmelCase_ : Tuple = 'The dog is cute and lives in the garden house'
UpperCAmelCase_ : Dict = jnp.array([tokenizer.encode(snake_case_ )] )
UpperCAmelCase_ : str = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ : List[str] = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
UpperCAmelCase_ : str = model(snake_case_ )['last_hidden_state']
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case_ , atol=1E-3 ) )
| 389 | 0 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : Optional[int] = 'autoformer'
__UpperCamelCase : int = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[int] ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : str = "student_t" ,lowerCamelCase : str = "nll" ,lowerCamelCase : int = 1 ,lowerCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] ,lowerCamelCase : bool = True ,lowerCamelCase : int = 0 ,lowerCamelCase : int = 0 ,lowerCamelCase : int = 0 ,lowerCamelCase : int = 0 ,lowerCamelCase : Optional[List[int]] = None ,lowerCamelCase : Optional[List[int]] = None ,lowerCamelCase : int = 64 ,lowerCamelCase : int = 2 ,lowerCamelCase : int = 2 ,lowerCamelCase : int = 2 ,lowerCamelCase : int = 2 ,lowerCamelCase : int = 32 ,lowerCamelCase : int = 32 ,lowerCamelCase : str = "gelu" ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : int = 100 ,lowerCamelCase : float = 0.02 ,lowerCamelCase : bool = True ,lowerCamelCase : str=True ,lowerCamelCase : int = 10 ,lowerCamelCase : int = 25 ,lowerCamelCase : int = 3 ,**lowerCamelCase : Dict ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = prediction_length
__SCREAMING_SNAKE_CASE = context_length if context_length is not None else prediction_length
__SCREAMING_SNAKE_CASE = distribution_output
__SCREAMING_SNAKE_CASE = loss
__SCREAMING_SNAKE_CASE = input_size
__SCREAMING_SNAKE_CASE = num_time_features
__SCREAMING_SNAKE_CASE = lags_sequence
__SCREAMING_SNAKE_CASE = scaling
__SCREAMING_SNAKE_CASE = num_dynamic_real_features
__SCREAMING_SNAKE_CASE = num_static_real_features
__SCREAMING_SNAKE_CASE = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__SCREAMING_SNAKE_CASE = cardinality
else:
__SCREAMING_SNAKE_CASE = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__SCREAMING_SNAKE_CASE = embedding_dimension
else:
__SCREAMING_SNAKE_CASE = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
__SCREAMING_SNAKE_CASE = num_parallel_samples
# Transformer architecture configuration
__SCREAMING_SNAKE_CASE = input_size * len(self.lags_sequence ) + self._number_of_features
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = use_cache
# Autoformer
__SCREAMING_SNAKE_CASE = label_length
__SCREAMING_SNAKE_CASE = moving_average
__SCREAMING_SNAKE_CASE = autocorrelation_factor
super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 109 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ ="""pegasus"""
a_ =["""past_key_values"""]
a_ ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Any , _a : int=5_0265 , _a : Tuple=1024 , _a : Optional[Any]=12 , _a : int=4096 , _a : str=16 , _a : Union[str, Any]=12 , _a : Optional[Any]=4096 , _a : Optional[int]=16 , _a : Union[str, Any]=0.0 , _a : str=0.0 , _a : List[Any]=True , _a : Optional[int]=True , _a : Tuple="gelu" , _a : Dict=1024 , _a : int=0.1 , _a : Any=0.0 , _a : Union[str, Any]=0.0 , _a : Any=0.02 , _a : Optional[Any]=0 , _a : str=False , _a : Tuple=0 , _a : Optional[int]=1 , _a : Union[str, Any]=1 , **_a : Optional[int] , ) -> Any:
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : Union[str, Any] = encoder_ffn_dim
__lowerCamelCase : Union[str, Any] = encoder_layers
__lowerCamelCase : List[Any] = encoder_attention_heads
__lowerCamelCase : Optional[int] = decoder_ffn_dim
__lowerCamelCase : int = decoder_layers
__lowerCamelCase : Tuple = decoder_attention_heads
__lowerCamelCase : Optional[Any] = dropout
__lowerCamelCase : Tuple = attention_dropout
__lowerCamelCase : int = activation_dropout
__lowerCamelCase : Dict = activation_function
__lowerCamelCase : List[Any] = init_std
__lowerCamelCase : List[str] = encoder_layerdrop
__lowerCamelCase : Union[str, Any] = decoder_layerdrop
__lowerCamelCase : Union[str, Any] = use_cache
__lowerCamelCase : Tuple = encoder_layers
__lowerCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , )
@property
def _lowercase ( self : Optional[int] ) -> int:
return self.encoder_attention_heads
@property
def _lowercase ( self : Any ) -> int:
return self.d_model
| 459 | 0 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ ) ->str:
# load base model
A =StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A =load_file(a_ )
A =[]
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A =key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
A =pipeline.text_encoder
else:
A =key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
A =pipeline.unet
# find the target layer
A =layer_infos.pop(0 )
while len(a_ ) > -1:
try:
A =curr_layer.__getattr__(a_ )
if len(a_ ) > 0:
A =layer_infos.pop(0 )
elif len(a_ ) == 0:
break
except Exception:
if len(a_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A =layer_infos.pop(0 )
A =[]
if "lora_down" in key:
pair_keys.append(key.replace("lora_down" , "lora_up" ) )
pair_keys.append(a_ )
else:
pair_keys.append(a_ )
pair_keys.append(key.replace("lora_up" , "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A =state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A =state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ ).unsqueeze(2 ).unsqueeze(3 )
else:
A =state_dict[pair_keys[0]].to(torch.floataa )
A =state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ )
# update visited list
for item in pair_keys:
visited.append(a_ )
return pipeline
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
__a = parser.parse_args()
__a = args.base_model_path
__a = args.checkpoint_path
__a = args.dump_path
__a = args.lora_prefix_unet
__a = args.lora_prefix_text_encoder
__a = args.alpha
__a = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__a = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 689 |
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A =find_backend(" if not is_torch_available():" )
self.assertEqual(snake_case__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A =find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A =find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" )
def _a ( self : List[Any] ):
"""simple docstring"""
A =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case__ )
self.assertIn("torch_and_transformers" , snake_case__ )
self.assertIn("flax_and_transformers" , snake_case__ )
self.assertIn("torch_and_transformers_and_onnx" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _a ( self : Dict ):
"""simple docstring"""
A =create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case__ , "\nCONSTANT = None\n" )
A =create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A =create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case__ , snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case__ )
| 689 | 1 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCAmelCase_ )-> Tuple:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> List[str]:
"""simple docstring"""
return max(metric_fn(UpperCAmelCase_ , UpperCAmelCase_ ) for gt in ground_truths )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
UpperCamelCase = [line.strip() for line in open(UpperCAmelCase_ , "r" ).readlines()]
UpperCamelCase = []
if args.gold_data_mode == "qa":
UpperCamelCase = pd.read_csv(UpperCAmelCase_ , sep="\t" , header=UpperCAmelCase_ )
for answer_list in data[1]:
UpperCamelCase = ast.literal_eval(UpperCAmelCase_ )
answers.append(UpperCAmelCase_ )
else:
UpperCamelCase = [line.strip() for line in open(UpperCAmelCase_ , "r" ).readlines()]
UpperCamelCase = [[reference] for reference in references]
UpperCamelCase = UpperCamelCase = UpperCamelCase = 0
for prediction, ground_truths in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
total += 1
em += metric_max_over_ground_truths(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
fa += metric_max_over_ground_truths(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = 100.0 * em / total
UpperCamelCase = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
UpperCamelCase = args.k
UpperCamelCase = [line.strip() for line in open(UpperCAmelCase_ , "r" ).readlines()]
UpperCamelCase = [line.strip() for line in open(UpperCAmelCase_ , "r" ).readlines()]
UpperCamelCase = UpperCamelCase = 0
for hypo, reference in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = set(hypo.split("\t" )[:k] )
UpperCamelCase = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCamelCase = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
def strip_title(UpperCAmelCase_ ):
if title.startswith("\"" ):
UpperCamelCase = title[1:]
if title.endswith("\"" ):
UpperCamelCase = title[:-1]
return title
UpperCamelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase_ , return_tensors="pt" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , )["input_ids"].to(args.device )
UpperCamelCase = rag_model.rag.question_encoder(UpperCAmelCase_ )
UpperCamelCase = question_enc_outputs[0]
UpperCamelCase = rag_model.retriever(
UpperCAmelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
UpperCamelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCamelCase = []
for docs in all_docs:
UpperCamelCase = [strip_title(UpperCAmelCase_ ) for title in docs["title"]]
provenance_strings.append("\t".join(UpperCAmelCase_ ) )
return provenance_strings
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
with torch.no_grad():
UpperCamelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase_ , return_tensors="pt" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
UpperCamelCase = inputs_dict.input_ids.to(args.device )
UpperCamelCase = inputs_dict.attention_mask.to(args.device )
UpperCamelCase = rag_model.generate( # rag_model overwrites generate
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCAmelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCamelCase = rag_model.retriever.generator_tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
if args.print_predictions:
for q, a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info("Q: {} - A: {}".format(UpperCAmelCase_ , UpperCAmelCase_ ) )
return answers
def lowerCamelCase__ ( )-> Any:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=UpperCAmelCase_ , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=UpperCAmelCase_ , choices=["exact", "compressed", "legacy"] , type=UpperCAmelCase_ , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=UpperCAmelCase_ , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=UpperCAmelCase_ , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=UpperCAmelCase_ , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=UpperCAmelCase_ , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=UpperCAmelCase_ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=UpperCAmelCase_ , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=UpperCAmelCase_ , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=UpperCAmelCase_ , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=UpperCAmelCase_ , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
UpperCamelCase = parser.parse_args()
UpperCamelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def lowerCamelCase__ ( UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
UpperCamelCase = {}
if args.model_type is None:
UpperCamelCase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
UpperCamelCase = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
UpperCamelCase = args.n_docs
if args.index_name is not None:
UpperCamelCase = args.index_name
if args.index_path is not None:
UpperCamelCase = args.index_path
else:
UpperCamelCase = BartForConditionalGeneration
UpperCamelCase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , UpperCAmelCase_ )
UpperCamelCase = get_scores if args.eval_mode == "e2e" else get_precision_at_k
UpperCamelCase = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(UpperCAmelCase_ , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(UpperCAmelCase_ ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
UpperCamelCase = RagRetriever.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCamelCase = model_class.from_pretrained(UpperCAmelCase_ , retriever=UpperCAmelCase_ , **UpperCAmelCase_ )
model.retriever.init_retrieval()
else:
UpperCamelCase = model_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
UpperCamelCase = []
for line in tqdm(UpperCAmelCase_ ):
questions.append(line.strip() )
if len(UpperCAmelCase_ ) == args.eval_batch_size:
UpperCamelCase = evaluate_batch_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
preds_file.write("\n".join(UpperCAmelCase_ ) + "\n" )
preds_file.flush()
UpperCamelCase = []
if len(UpperCAmelCase_ ) > 0:
UpperCamelCase = evaluate_batch_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
preds_file.write("\n".join(UpperCAmelCase_ ) )
preds_file.flush()
score_fn(UpperCAmelCase_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = get_args()
main(args)
| 554 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = embedding_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def __A ( self , A , A , A , A , A , A , A ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = MegatronBertModel(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A )
__magic_name__ = model(A , token_type_ids=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , A , A , A , A , A , A , A ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = MegatronBertForMaskedLM(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , A , A , A ) -> Any:
'''simple docstring'''
__magic_name__ = MegatronBertForCausalLM(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
__magic_name__ = MegatronBertForNextSentencePrediction(config=A )
model.to(A )
model.eval()
__magic_name__ = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __A ( self , A , A , A , A , A , A , A ) -> Any:
'''simple docstring'''
__magic_name__ = MegatronBertForPreTraining(config=A )
model.to(A )
model.eval()
__magic_name__ = model(
A , attention_mask=A , token_type_ids=A , labels=A , next_sentence_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __A ( self , A , A , A , A , A , A , A ) -> Any:
'''simple docstring'''
__magic_name__ = MegatronBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
__magic_name__ = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , A , A , A , A , A , A , A ) -> Any:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = MegatronBertForSequenceClassification(A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A , A , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = MegatronBertForTokenClassification(config=A )
model.to(A )
model.eval()
__magic_name__ = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A , A , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.num_choices
__magic_name__ = MegatronBertForMultipleChoice(config=A )
model.to(A )
model.eval()
__magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_a = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = True
# test_resize_embeddings = False
_a = False
def __A ( self , A , A , A=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = MegatronBertModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , hidden_size=37 )
def __A ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*A )
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*A )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*A )
def __A ( self ) -> int:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*A )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
return torch.tensor(
snake_case_ , dtype=torch.long , device=snake_case_ , )
a_ : List[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('''Model is not available.''' )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__magic_name__ = os.path.join(os.environ['''MYDIR'''] , A )
__magic_name__ = MegatronBertModel.from_pretrained(A )
model.to(A )
model.half()
__magic_name__ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__magic_name__ = model(A )[0]
__magic_name__ = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , A )
__magic_name__ = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
__magic_name__ = output[0, ii, jj]
__magic_name__ = expected[3 * ii + jj]
__magic_name__ = '''ii={} jj={} a={} b={}'''.format(A , A , A , A )
self.assertTrue(math.isclose(A , A , rel_tol=A , abs_tol=A ) , msg=A ) | 678 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ):
return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark() | 678 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "deta"
lowerCAmelCase_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=900 , UpperCAmelCase=2048 , UpperCAmelCase=6 , UpperCAmelCase=2048 , UpperCAmelCase=8 , UpperCAmelCase=6 , UpperCAmelCase=1024 , UpperCAmelCase=8 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=256 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1.0 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="sine" , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=True , UpperCAmelCase=300 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.25 , **UpperCAmelCase , ) -> List[Any]:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_snake_case = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_snake_case = backbone_config.pop("""model_type""" )
_snake_case = CONFIG_MAPPING[backbone_model_type]
_snake_case = config_class.from_dict(UpperCAmelCase )
_snake_case = backbone_config
_snake_case = num_queries
_snake_case = max_position_embeddings
_snake_case = d_model
_snake_case = encoder_ffn_dim
_snake_case = encoder_layers
_snake_case = encoder_attention_heads
_snake_case = decoder_ffn_dim
_snake_case = decoder_layers
_snake_case = decoder_attention_heads
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = activation_function
_snake_case = init_std
_snake_case = init_xavier_std
_snake_case = encoder_layerdrop
_snake_case = auxiliary_loss
_snake_case = position_embedding_type
# deformable attributes
_snake_case = num_feature_levels
_snake_case = encoder_n_points
_snake_case = decoder_n_points
_snake_case = two_stage
_snake_case = two_stage_num_proposals
_snake_case = with_box_refine
_snake_case = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = mask_loss_coefficient
_snake_case = dice_loss_coefficient
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
_snake_case = focal_alpha
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def lowercase (self ) -> int:
return self.encoder_attention_heads
@property
def lowercase (self ) -> int:
return self.d_model
def lowercase (self ) -> List[str]:
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = self.backbone_config.to_dict()
_snake_case = self.__class__.model_type
return output | 585 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase (self ) -> Optional[Any]:
torch.manual_seed(0 )
_snake_case = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowercase (self ) -> Dict:
_snake_case = self.dummy_uncond_unet
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" ).images
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCAmelCase )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
_snake_case = """google/ddpm-cifar10-32"""
_snake_case = UNetaDModel.from_pretrained(UpperCAmelCase )
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , output_type="""numpy""" ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 585 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
_A = StableDiffusionLatentUpscalePipeline
_A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
_A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
_A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_A = frozenset([])
_A = True
@property
def _a (self ):
'''simple docstring'''
lowerCamelCase = 1
lowerCamelCase = 4
lowerCamelCase = (16, 16)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
def _a (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=__a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=__a , only_cross_attention=__a , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
lowerCamelCase = EulerDiscreteScheduler(prediction_type="sample" )
lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
lowerCamelCase = CLIPTextModel(__a )
lowerCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _a (self , __a , __a=0 ):
'''simple docstring'''
if str(__a ).startswith("mps" ):
lowerCamelCase = torch.manual_seed(__a )
else:
lowerCamelCase = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu"
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = pipe(**__a ).images
lowerCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
lowerCamelCase = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1E-3 )
def _a (self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _a (self ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _a (self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _a (self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _a (self ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _a (self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def _a (self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _a (self ):
'''simple docstring'''
lowerCamelCase = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = self.pipeline_class(**__a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = 2
lowerCamelCase = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowerCamelCase = getattr(__a , scheduler_enum.name )
lowerCamelCase = scheduler_cls.from_config(pipe.scheduler.config )
lowerCamelCase = pipe(**__a )[0]
outputs.append(__a )
assert check_same_shape(__a )
@require_torch_gpu
@slow
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
'''simple docstring'''
lowerCamelCase = torch.manual_seed(33 )
lowerCamelCase = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
lowerCamelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowerCamelCase = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
lowerCamelCase = pipe(__a , generator=__a , output_type="latent" ).images
lowerCamelCase = upscaler(
prompt=__a , image=__a , num_inference_steps=20 , guidance_scale=0 , generator=__a , output_type="np" , ).images[0]
lowerCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = torch.manual_seed(33 )
lowerCamelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowerCamelCase = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
lowerCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
lowerCamelCase = upscaler(
prompt=__a , image=__a , num_inference_steps=20 , guidance_scale=0 , generator=__a , output_type="np" , ).images[0]
lowerCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2 | 484 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = ['image_processor', 'tokenizer']
_A = 'CLIPImageProcessor'
_A = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__(self , __a=None , __a=None , **__a ):
'''simple docstring'''
lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
lowerCamelCase = kwargs.pop("feature_extractor" )
lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__(self , __a=None , __a=None , __a=None , **__a ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowerCamelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
lowerCamelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
lowerCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def _a (self , *__a , **__a ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a )
def _a (self , *__a , **__a ):
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a )
@property
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.tokenizer.model_input_names
lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a (self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def _a (self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor | 484 | 1 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _lowerCamelCase :
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.get_dummy_input()
@property
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def UpperCAmelCase ( self , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = 4
__snake_case : int = 32
__snake_case : str = (32, 32)
__snake_case : str = torch.manual_seed(0 )
__snake_case : Optional[Any] = torch.device(UpperCAmelCase )
__snake_case : Optional[int] = (batch_size, num_channels) + sizes
__snake_case : List[Any] = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase )
__snake_case : Optional[int] = {"hidden_states": hidden_states}
if include_temb:
__snake_case : Union[str, Any] = 128
__snake_case : Union[str, Any] = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase )
if include_res_hidden_states_tuple:
__snake_case : Any = torch.manual_seed(1 )
__snake_case : int = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),)
if include_encoder_hidden_states:
__snake_case : Dict = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase )
if include_skip_sample:
__snake_case : str = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase )
return dummy_input
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
__snake_case : str = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
__snake_case : List[str] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
__snake_case , __snake_case : Tuple = self.prepare_init_args_and_inputs_for_common()
__snake_case : str = self.block_class(**UpperCAmelCase )
unet_block.to(UpperCAmelCase )
unet_block.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = unet_block(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__snake_case : str = output[0]
self.assertEqual(output.shape , self.output_shape )
__snake_case : Tuple = output[0, -1, -3:, -3:]
__snake_case : Dict = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case : Dict = self.prepare_init_args_and_inputs_for_common()
__snake_case : Any = self.block_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
__snake_case : str = model(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__snake_case : Dict = output[0]
__snake_case : Dict = torch.device(UpperCAmelCase )
__snake_case : str = randn_tensor(output.shape , device=UpperCAmelCase )
__snake_case : Optional[Any] = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase )
loss.backward()
| 243 |
def lowerCAmelCase__( lowercase : list , lowercase : list , lowercase : int , lowercase : int , lowercase : int ) -> int:
if index == number_of_items:
return 0
__snake_case : Optional[int] = 0
__snake_case : List[Any] = 0
__snake_case : int = knapsack(lowercase , lowercase , lowercase , lowercase , index + 1 )
if weights[index] <= max_weight:
__snake_case : List[str] = values[index] + knapsack(
lowercase , lowercase , lowercase , max_weight - weights[index] , index + 1 )
return max(lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 243 | 1 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _A ( _lowerCAmelCase , _lowerCAmelCase=0.9_99 , _lowerCAmelCase="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCAmelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__lowercase =[]
for i in range(_lowerCAmelCase ):
__lowercase =i / num_diffusion_timesteps
__lowercase =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) , _lowerCAmelCase ) )
return torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
class _UpperCamelCase ( A , A ):
'''simple docstring'''
lowerCAmelCase__ = [e.name for e in KarrasDiffusionSchedulers]
lowerCAmelCase__ = 2
@register_to_config
def __init__( self : Optional[int] , _lowerCAmelCase : int = 1_0_0_0 , _lowerCAmelCase : float = 0.0_0085 , _lowerCAmelCase : float = 0.012 , _lowerCAmelCase : str = "linear" , _lowerCAmelCase : Optional[Union[np.ndarray, List[float]]] = None , _lowerCAmelCase : str = "epsilon" , _lowerCAmelCase : str = "linspace" , _lowerCAmelCase : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
__lowercase =torch.tensor(_lowerCAmelCase , dtype=torch.floataa)
elif beta_schedule == "linear":
__lowercase =torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowercase =(
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowercase =betas_for_alpha_bar(_lowerCAmelCase)
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""")
__lowercase =1.0 - self.betas
__lowercase =torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int=None):
'''simple docstring'''
if schedule_timesteps is None:
__lowercase =self.timesteps
__lowercase =(schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
__lowercase =1 if len(_lowerCAmelCase) > 1 else 0
else:
__lowercase =timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase) else timestep
__lowercase =self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCamelCase ( self : int , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
__lowercase =self.index_for_timestep(_lowerCAmelCase)
if self.state_in_first_order:
__lowercase =self.sigmas[step_index]
else:
__lowercase =self.sigmas_interpol[step_index]
__lowercase =sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, torch.device] = None , _lowerCAmelCase : Optional[int] = None , ):
'''simple docstring'''
__lowercase =num_inference_steps
__lowercase =num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowercase =np.linspace(0 , num_train_timesteps - 1 , _lowerCAmelCase , dtype=_lowerCAmelCase)[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowercase =num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowercase =(np.arange(0 , _lowerCAmelCase) * step_ratio).round()[::-1].copy().astype(_lowerCAmelCase)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowercase =num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowercase =(np.arange(_lowerCAmelCase , 0 , -step_ratio)).round().copy().astype(_lowerCAmelCase)
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""")
__lowercase =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
__lowercase =torch.from_numpy(np.log(_lowerCAmelCase)).to(_lowerCAmelCase)
__lowercase =np.interp(_lowerCAmelCase , np.arange(0 , len(_lowerCAmelCase)) , _lowerCAmelCase)
__lowercase =np.concatenate([sigmas, [0.0]]).astype(np.floataa)
__lowercase =torch.from_numpy(_lowerCAmelCase).to(device=_lowerCAmelCase)
# interpolate sigmas
__lowercase =sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp()
__lowercase =torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
__lowercase =torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]])
if str(_lowerCAmelCase).startswith('mps'):
# mps does not support float64
__lowercase =torch.from_numpy(_lowerCAmelCase).to(_lowerCAmelCase , dtype=torch.floataa)
else:
__lowercase =torch.from_numpy(_lowerCAmelCase).to(_lowerCAmelCase)
# interpolate timesteps
__lowercase =self.sigma_to_t(_lowerCAmelCase).to(_lowerCAmelCase , dtype=timesteps.dtype)
__lowercase =torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten()
__lowercase =torch.cat([timesteps[:1], interleaved_timesteps])
__lowercase =None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowercase =defaultdict(_lowerCAmelCase)
def __lowerCamelCase ( self : Dict , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase =sigma.log()
# get distribution
__lowercase =log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowercase =dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
__lowercase =low_idx + 1
__lowercase =self.log_sigmas[low_idx]
__lowercase =self.log_sigmas[high_idx]
# interpolate sigmas
__lowercase =(low - log_sigma) / (low - high)
__lowercase =w.clamp(0 , 1)
# transform interpolation to time range
__lowercase =(1 - w) * low_idx + w * high_idx
__lowercase =t.view(sigma.shape)
return t
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return self.sample is None
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : Union[torch.FloatTensor, np.ndarray] , _lowerCAmelCase : Union[float, torch.FloatTensor] , _lowerCAmelCase : Union[torch.FloatTensor, np.ndarray] , _lowerCAmelCase : bool = True , ):
'''simple docstring'''
__lowercase =self.index_for_timestep(_lowerCAmelCase)
# advance index counter by 1
__lowercase =timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowercase =self.sigmas[step_index]
__lowercase =self.sigmas_interpol[step_index + 1]
__lowercase =self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowercase =self.sigmas[step_index - 1]
__lowercase =self.sigmas_interpol[step_index]
__lowercase =self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowercase =0
__lowercase =sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowercase =sigma_hat if self.state_in_first_order else sigma_interpol
__lowercase =sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowercase =sigma_hat if self.state_in_first_order else sigma_interpol
__lowercase =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample')
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""")
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowercase =(sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowercase =sigma_interpol - sigma_hat
# store for 2nd order step
__lowercase =sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowercase =(sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowercase =sigma_next - sigma_hat
__lowercase =self.sample
__lowercase =None
__lowercase =sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : torch.FloatTensor , _lowerCAmelCase : torch.FloatTensor , ):
'''simple docstring'''
__lowercase =self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(_lowerCAmelCase):
# mps does not support float64
__lowercase =self.timesteps.to(original_samples.device , dtype=torch.floataa)
__lowercase =timesteps.to(original_samples.device , dtype=torch.floataa)
else:
__lowercase =self.timesteps.to(original_samples.device)
__lowercase =timesteps.to(original_samples.device)
__lowercase =[self.index_for_timestep(_lowerCAmelCase , _lowerCAmelCase) for t in timesteps]
__lowercase =sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
__lowercase =sigma.unsqueeze(-1)
__lowercase =original_samples + noise * sigma
return noisy_samples
def __len__( self : Tuple):
'''simple docstring'''
return self.config.num_train_timesteps
| 454 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
__lowercase =DatasetInfosDict.from_directory(_lowerCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =str(_lowerCAmelCase )
dataset_info.write_to_directory(_lowerCAmelCase )
__lowercase =DatasetInfo.from_directory(_lowerCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_lowerCAmelCase , 'dataset_info.json' ) )
def _A ( ):
"""simple docstring"""
__lowercase =DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
__lowercase =dataset_info._to_yaml_dict()
assert sorted(_lowerCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__lowercase =yaml.safe_dump(_lowerCAmelCase )
__lowercase =yaml.safe_load(_lowerCAmelCase )
assert dataset_info_yaml_dict == reloaded
def _A ( ):
"""simple docstring"""
__lowercase =DatasetInfo()
__lowercase =dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =str(_lowerCAmelCase )
dataset_infos_dict.write_to_directory(_lowerCAmelCase )
__lowercase =DatasetInfosDict.from_directory(_lowerCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__lowercase =config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__lowercase =DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_lowerCAmelCase , 'README.md' ) )
| 454 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase__ : List[Any] = [
"good first issue",
"feature request",
"wip",
]
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Github(os.environ["""GITHUB_TOKEN"""] )
_SCREAMING_SNAKE_CASE = g.get_repo("""huggingface/accelerate""" )
_SCREAMING_SNAKE_CASE = repo.get_issues(state="""open""" )
for issue in open_issues:
_SCREAMING_SNAKE_CASE = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE_ : i.created_at , reverse=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = comments[0] if len(SCREAMING_SNAKE_CASE_ ) > 0 else None
_SCREAMING_SNAKE_CASE = dt.utcnow()
_SCREAMING_SNAKE_CASE = (current_time - issue.updated_at).days
_SCREAMING_SNAKE_CASE = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 591 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCamelCase__ : Optional[Any] = logging.getLogger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = bnb_quantization_config.load_in_abit
_SCREAMING_SNAKE_CASE = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
_SCREAMING_SNAKE_CASE = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(device_map.keys() ) > 1:
_SCREAMING_SNAKE_CASE = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_SCREAMING_SNAKE_CASE = get_keys_to_not_convert(SCREAMING_SNAKE_CASE_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE_ )
# compatibility with peft
_SCREAMING_SNAKE_CASE = load_in_abit
_SCREAMING_SNAKE_CASE = load_in_abit
_SCREAMING_SNAKE_CASE = get_parameter_device(SCREAMING_SNAKE_CASE_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
_SCREAMING_SNAKE_CASE = replace_with_bnb_layers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
# convert param to the right dtype
_SCREAMING_SNAKE_CASE = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_SCREAMING_SNAKE_CASE = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
_SCREAMING_SNAKE_CASE = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
param.to(SCREAMING_SNAKE_CASE_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F"The model device type is {model_device.type}. However, cuda is needed for quantization."
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
_SCREAMING_SNAKE_CASE = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_memory=SCREAMING_SNAKE_CASE_ , no_split_module_classes=SCREAMING_SNAKE_CASE_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE_ , offload_state_dict=SCREAMING_SNAKE_CASE_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE_ , device_map=SCREAMING_SNAKE_CASE_ , offload_dir=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> List[str]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
_SCREAMING_SNAKE_CASE = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
_SCREAMING_SNAKE_CASE = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = special_dtypes
_SCREAMING_SNAKE_CASE = no_split_module_classes
_SCREAMING_SNAKE_CASE = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_SCREAMING_SNAKE_CASE = get_balanced_memory(
SCREAMING_SNAKE_CASE_ , low_zero=(device_map == """balanced_low_0""") , max_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
_SCREAMING_SNAKE_CASE = max_memory
_SCREAMING_SNAKE_CASE = infer_auto_device_map(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# check if don't have any quantized module on the cpu
_SCREAMING_SNAKE_CASE = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_SCREAMING_SNAKE_CASE = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = False
for name, module in model.named_children():
if current_key_name is None:
_SCREAMING_SNAKE_CASE = []
current_key_name.append(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_SCREAMING_SNAKE_CASE = """.""".join(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_SCREAMING_SNAKE_CASE = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_SCREAMING_SNAKE_CASE = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_SCREAMING_SNAKE_CASE = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
_SCREAMING_SNAKE_CASE = module.weight.data
if module.bias is not None:
_SCREAMING_SNAKE_CASE = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = True
if len(list(module.children() ) ) > 0:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
# Create a copy of the model
with init_empty_weights():
_SCREAMING_SNAKE_CASE = deepcopy(SCREAMING_SNAKE_CASE_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_SCREAMING_SNAKE_CASE = find_tied_parameters(SCREAMING_SNAKE_CASE_ )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_SCREAMING_SNAKE_CASE = sum(SCREAMING_SNAKE_CASE_ , [] )
_SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) > 0
# Check if it is a base model
_SCREAMING_SNAKE_CASE = False
if hasattr(SCREAMING_SNAKE_CASE_ , """base_model_prefix""" ):
_SCREAMING_SNAKE_CASE = not hasattr(SCREAMING_SNAKE_CASE_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_SCREAMING_SNAKE_CASE = list(model.named_children() )
_SCREAMING_SNAKE_CASE = [list_modules[-1][0]]
# add last module together with tied weights
_SCREAMING_SNAKE_CASE = set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = list(set(SCREAMING_SNAKE_CASE_ ) ) + list(SCREAMING_SNAKE_CASE_ )
# remove ".weight" from the keys
_SCREAMING_SNAKE_CASE = [""".weight""", """.bias"""]
_SCREAMING_SNAKE_CASE = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_SCREAMING_SNAKE_CASE = name.replace(SCREAMING_SNAKE_CASE_ , """""" )
filtered_module_names.append(SCREAMING_SNAKE_CASE_ )
return filtered_module_names
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , bnb.nn.Linearabit ):
return True
return False
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
return next(parameter.parameters() ).device
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 , dtype=SCREAMING_SNAKE_CASE_ , value=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = param_name
_SCREAMING_SNAKE_CASE = model
if "." in tensor_name:
_SCREAMING_SNAKE_CASE = tensor_name.split(""".""" )
for split in splits[:-1]:
_SCREAMING_SNAKE_CASE = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_SCREAMING_SNAKE_CASE = new_module
_SCREAMING_SNAKE_CASE = splits[-1]
# offload weights
_SCREAMING_SNAKE_CASE = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , )
else:
offload_weight(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
offload_weight(SCREAMING_SNAKE_CASE_ , param_name.replace("""weight""" , """SCB""" ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """meta""" , dtype=SCREAMING_SNAKE_CASE_ , value=torch.empty(*param.size() ) )
| 591 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class a__ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _snake_case (self ):
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=UpperCamelCase__ , )
def _snake_case (self , __lowercase , __lowercase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def _snake_case (self , __lowercase , __lowercase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase__ )
class a__ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _snake_case (self ):
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=UpperCamelCase__ , )
def _snake_case (self , __lowercase , __lowercase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def _snake_case (self , __lowercase , __lowercase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase__ )
def __magic_name__( ):
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''])]
def __magic_name__( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''])]
class a__ ( lowercase_ ):
"""simple docstring"""
@require_beam
def _snake_case (self ):
__lowerCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCAmelCase = DummyBeamDataset(cache_dir=UpperCamelCase__ , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase__ , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
__lowerCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , UpperCamelCase__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , UpperCamelCase__ )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def _snake_case (self ):
import apache_beam as beam
__lowerCAmelCase = beam.io.parquetio.WriteToParquet
__lowerCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCAmelCase = DummyBeamDataset(cache_dir=UpperCamelCase__ , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
__lowerCAmelCase = partial(UpperCamelCase__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase__ , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase__ , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
__lowerCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , UpperCamelCase__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , UpperCamelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def _snake_case (self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCAmelCase = DummyBeamDataset(cache_dir=UpperCamelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _snake_case (self ):
__lowerCAmelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCAmelCase = NestedBeamDataset(cache_dir=UpperCamelCase__ , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase__ , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
__lowerCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , UpperCamelCase__ )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , UpperCamelCase__ )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 711 |
'''simple docstring'''
def __magic_name__( ):
return [
a * b * (1_0_0_0 - a - b)
for a in range(1, 9_9_9)
for b in range(lowerCamelCase, 9_9_9)
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 474 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Optional[int] = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 385 |
import numpy as np
def A__ ( _a : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 385 | 1 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
# Initialise PyTorch model
lowercase__ = XLNetConfig.from_json_file(__magic_name__ )
lowercase__ = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
lowercase__ = finetuning_task
lowercase__ = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase__ = XLNetForSequenceClassification(__magic_name__ )
elif "squad" in finetuning_task:
lowercase__ = finetuning_task
lowercase__ = XLNetForQuestionAnswering(__magic_name__ )
else:
lowercase__ = XLNetLMHeadModel(__magic_name__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
print(f'''Save PyTorch model to {os.path.abspath(__magic_name__ )}''' )
torch.save(model.state_dict() , __magic_name__ )
print(f'''Save configuration file to {os.path.abspath(__magic_name__ )}''' )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 713 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_snake_case = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def _A ( __magic_name__ , __magic_name__ , __magic_name__=None ):
if rng is None:
lowercase__ = random.Random()
lowercase__ = 1
for dim in shape:
total_dims *= dim
lowercase__ = []
for _ in range(__magic_name__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase__ = np.array(__magic_name__ , dtype=jnp.intaa ).reshape(__magic_name__ )
return output
def _A ( __magic_name__ , __magic_name__=None ):
lowercase__ = ids_tensor(__magic_name__ , vocab_size=2 , rng=__magic_name__ )
# make sure that at least one token is attended to for each batch
lowercase__ = 1
return attn_mask
@require_flax
class lowerCAmelCase :
__lowerCamelCase = None
__lowerCamelCase = ()
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase__ = 2
lowercase__ = inputs["input_ids"].shape[-1] // 2
lowercase__ = inputs["input_ids"][:max_batch_size, :sequence_length]
lowercase__ = jnp.ones_like(_lowercase )
lowercase__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = False
lowercase__ = max_length
lowercase__ = 0
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ = getattr(_lowercase , _lowercase )
lowercase__ = pt_model_class(_lowercase ).eval()
lowercase__ = load_flax_weights_in_pytorch_model(_lowercase , flax_model.params )
lowercase__ = flax_model.generate(_lowercase ).sequences
lowercase__ = pt_model.generate(torch.tensor(_lowercase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = False
lowercase__ = max_length
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = True
lowercase__ = max_length
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = False
lowercase__ = max_length
lowercase__ = 2
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = False
lowercase__ = max_length
lowercase__ = 2
lowercase__ = 2
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = True
lowercase__ = max_length
lowercase__ = 0.8
lowercase__ = 10
lowercase__ = 0.3
lowercase__ = 1
lowercase__ = 8
lowercase__ = 9
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = max_length
lowercase__ = 1
lowercase__ = 8
lowercase__ = 9
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = max_length
lowercase__ = 2
lowercase__ = 1
lowercase__ = 8
lowercase__ = 9
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase__ = attention_mask.at[(0, 0)].set(0 )
lowercase__ = False
lowercase__ = max_length
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase__ = attention_mask.at[(0, 0)].set(0 )
lowercase__ = True
lowercase__ = max_length
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase__ = attention_mask.at[(0, 0)].set(0 )
lowercase__ = 2
lowercase__ = max_length
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
lowercase__ = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
lowercase__ = "Hello world"
lowercase__ = tokenizer(_lowercase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_lowercase , "do_samples" ):
model.generate(_lowercase , do_samples=_lowercase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_lowercase , "foo" ):
lowercase__ = {"foo": "bar"}
model.generate(_lowercase , **_lowercase )
| 611 | 0 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _UpperCamelCase ( lowerCAmelCase__: List[str] ,lowerCAmelCase__: List[Any] ,lowerCAmelCase__: List[Any] ,lowerCAmelCase__: str ,lowerCAmelCase__: int ) -> Tuple:
# load base model
SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ ,torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
SCREAMING_SNAKE_CASE_ = load_file(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
SCREAMING_SNAKE_CASE_ = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE_ = pipeline.text_encoder
else:
SCREAMING_SNAKE_CASE_ = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE_ = pipeline.unet
# find the target layer
SCREAMING_SNAKE_CASE_ = layer_infos.pop(0 )
while len(lowerCAmelCase__ ) > -1:
try:
SCREAMING_SNAKE_CASE_ = curr_layer.__getattr__(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE_ = layer_infos.pop(0 )
elif len(lowerCAmelCase__ ) == 0:
break
except Exception:
if len(lowerCAmelCase__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
SCREAMING_SNAKE_CASE_ = layer_infos.pop(0 )
SCREAMING_SNAKE_CASE_ = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' ,'lora_up' ) )
pair_keys.append(lowerCAmelCase__ )
else:
pair_keys.append(lowerCAmelCase__ )
pair_keys.append(key.replace('lora_up' ,'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
SCREAMING_SNAKE_CASE_ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
SCREAMING_SNAKE_CASE_ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCAmelCase__ ,lowerCAmelCase__ ).unsqueeze(2 ).unsqueeze(3 )
else:
SCREAMING_SNAKE_CASE_ = state_dict[pair_keys[0]].to(torch.floataa )
SCREAMING_SNAKE_CASE_ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCAmelCase__ ,lowerCAmelCase__ )
# update visited list
for item in pair_keys:
visited.append(lowerCAmelCase__ )
return pipeline
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.7_5, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = args.base_model_path
SCREAMING_SNAKE_CASE : Optional[Any] = args.checkpoint_path
SCREAMING_SNAKE_CASE : Union[str, Any] = args.dump_path
SCREAMING_SNAKE_CASE : Union[str, Any] = args.lora_prefix_unet
SCREAMING_SNAKE_CASE : Tuple = args.lora_prefix_text_encoder
SCREAMING_SNAKE_CASE : Tuple = args.alpha
SCREAMING_SNAKE_CASE : Any = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
SCREAMING_SNAKE_CASE : List[str] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 294 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case ( lowercase_ ):
"""simple docstring"""
def __init__( self, _lowercase, _lowercase, _lowercase ) -> List[Any]:
super().__init__()
self.register_modules(vqvae=_lowercase, unet=_lowercase, scheduler=_lowercase )
@torch.no_grad()
def __call__( self, _lowercase = 1, _lowercase = None, _lowercase = 0.0, _lowercase = 50, _lowercase = "pil", _lowercase = True, **_lowercase, ) -> Union[Tuple, ImagePipelineOutput]:
SCREAMING_SNAKE_CASE_ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=_lowercase, )
SCREAMING_SNAKE_CASE_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE_ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowercase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
SCREAMING_SNAKE_CASE_ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE_ = {}
if accepts_eta:
SCREAMING_SNAKE_CASE_ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_lowercase, _lowercase )
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(_lowercase, _lowercase ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_lowercase, _lowercase, _lowercase, **_lowercase ).prev_sample
# decode the image latents with the VAE
SCREAMING_SNAKE_CASE_ = self.vqvae.decode(_lowercase ).sample
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0, 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 294 | 1 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a__ :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=64 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.0_2 , _a=3 , _a=4 , _a=None , ):
lowercase : int = parent
lowercase : Optional[Any] = batch_size
lowercase : Tuple = seq_length
lowercase : Optional[int] = is_training
lowercase : Optional[int] = use_input_mask
lowercase : Tuple = use_token_type_ids
lowercase : Tuple = use_labels
lowercase : List[str] = vocab_size
lowercase : Optional[int] = hidden_size
lowercase : Union[str, Any] = embedding_size
lowercase : List[str] = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : Union[str, Any] = intermediate_size
lowercase : Optional[Any] = hidden_act
lowercase : Optional[int] = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : str = max_position_embeddings
lowercase : Union[str, Any] = type_vocab_size
lowercase : str = type_sequence_label_size
lowercase : Any = initializer_range
lowercase : Any = num_labels
lowercase : Any = num_choices
lowercase : Union[str, Any] = scope
def __magic_name__ ( self ):
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str = None
if self.use_input_mask:
lowercase : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_token_type_ids:
lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int = None
lowercase : Any = None
lowercase : Dict = None
if self.use_labels:
lowercase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a ):
lowercase : Optional[Any] = MobileBertModel(config=_a )
model.to(_a )
model.eval()
lowercase : Tuple = model(_a , attention_mask=_a , token_type_ids=_a )
lowercase : List[Any] = model(_a , token_type_ids=_a )
lowercase : Tuple = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a ):
lowercase : List[Any] = MobileBertForMaskedLM(config=_a )
model.to(_a )
model.eval()
lowercase : Tuple = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a ):
lowercase : Optional[int] = MobileBertForNextSentencePrediction(config=_a )
model.to(_a )
model.eval()
lowercase : Any = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a ):
lowercase : str = MobileBertForPreTraining(config=_a )
model.to(_a )
model.eval()
lowercase : Union[str, Any] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , next_sentence_label=_a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a ):
lowercase : List[str] = MobileBertForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
lowercase : List[str] = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a ):
lowercase : Optional[Any] = self.num_labels
lowercase : str = MobileBertForSequenceClassification(_a )
model.to(_a )
model.eval()
lowercase : List[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a ):
lowercase : Tuple = self.num_labels
lowercase : Any = MobileBertForTokenClassification(config=_a )
model.to(_a )
model.eval()
lowercase : int = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a ):
lowercase : List[str] = self.num_choices
lowercase : str = MobileBertForMultipleChoice(config=_a )
model.to(_a )
model.eval()
lowercase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Union[str, Any] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self ):
lowercase : Dict = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : List[Any] = config_and_inputs
lowercase : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( a_, a_, unittest.TestCase ):
__lowerCAmelCase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = True
def __magic_name__ ( self , _a , _a , _a=False ):
lowercase : Optional[Any] = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class in get_values(_a ):
lowercase : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_a )
lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def __magic_name__ ( self ):
lowercase : List[Any] = MobileBertModelTester(self )
lowercase : str = ConfigTester(self , config_class=_a , hidden_size=37 )
def __magic_name__ ( self ):
self.config_tester.run_common_tests()
def __magic_name__ ( self ):
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_a )
def __magic_name__ ( self ):
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_a )
def __magic_name__ ( self ):
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_a )
def __magic_name__ ( self ):
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_a )
def __magic_name__ ( self ):
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_a )
def __magic_name__ ( self ):
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_a )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_a )
def __magic_name__ ( self ):
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_a )
def __magic_name__ ( __snake_case : List[Any] ) -> Dict:
return torch.tensor(
__snake_case , dtype=torch.long , device=__snake_case , )
_A : Union[str, Any] = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
lowercase : List[Any] = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(_a )
lowercase : Optional[Any] = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
lowercase : Union[str, Any] = model(_a )[0]
lowercase : List[str] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , _a )
lowercase : Dict = torch.tensor(
[
[
[-2.4736526E07, 8.2691656E04, 1.6521838E05],
[-5.7541704E-01, 3.9056022E00, 4.4011507E00],
[2.6047359E00, 1.5677652E00, -1.7324188E-01],
]
] , device=_a , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowercase : Optional[Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowercase : Tuple = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 518 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
_A : Dict = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 518 | 1 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _lowerCAmelCase ( __lowerCAmelCase ) -> None:
"""simple docstring"""
snake_case__ , snake_case__ : Optional[int] = analyze_text(__lowerCAmelCase )
snake_case__ : List[str] = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
snake_case__ : Union[str, Any] = sum(single_char_strings.values() )
# one length string
snake_case__ : Optional[int] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
snake_case__ : Any = single_char_strings[ch]
snake_case__ : Dict = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCAmelCase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
snake_case__ : Any = sum(two_char_strings.values() )
snake_case__ : Union[str, Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
snake_case__ : Optional[Any] = cha + cha
if sequence in two_char_strings:
snake_case__ : Union[str, Any] = two_char_strings[sequence]
snake_case__ : Union[str, Any] = int(__lowerCAmelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCAmelCase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def _lowerCAmelCase ( __lowerCAmelCase ) -> tuple[dict, dict]:
"""simple docstring"""
snake_case__ : List[str] = Counter() # type: ignore
snake_case__ : Union[str, Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 252 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
A__ = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class a ( unittest.TestCase , __lowerCamelCase ):
def __lowerCamelCase ( self :str ):
snake_case__ : Union[str, Any] = load_tool('''text-question-answering''' )
self.tool.setup()
snake_case__ : int = load_tool('''text-question-answering''' ,remote=__lowercase )
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : int = self.tool(__lowercase ,'''What did Hugging Face do in April 2021?''' )
self.assertEqual(__lowercase ,'''launched the BigScience Research Workshop''' )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : int = self.remote_tool(__lowercase ,'''What did Hugging Face do in April 2021?''' )
self.assertEqual(__lowercase ,'''launched the BigScience Research Workshop''' )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : int = self.tool(text=__lowercase ,question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(__lowercase ,'''launched the BigScience Research Workshop''' )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Optional[Any] = self.remote_tool(text=__lowercase ,question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(__lowercase ,'''launched the BigScience Research Workshop''' )
| 252 | 1 |
import sys
import turtle
def A__ ( _a : tuple[float, float] , _a : tuple[float, float] ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def A__ ( _a : tuple[float, float] , _a : tuple[float, float] , _a : tuple[float, float] , _a : int , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
__lowerCamelCase : str = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
__lowerCamelCase : Optional[int] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 711 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowercase ( unittest.TestCase ):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ):
snake_case__ : Any =parent
snake_case__ : Dict =batch_size
snake_case__ : List[Any] =seq_length
snake_case__ : str =is_training
snake_case__ : Union[str, Any] =use_attention_mask
snake_case__ : str =use_token_type_ids
snake_case__ : int =use_labels
snake_case__ : Tuple =vocab_size
snake_case__ : List[str] =hidden_size
snake_case__ : Dict =num_hidden_layers
snake_case__ : Optional[Any] =num_attention_heads
snake_case__ : List[str] =intermediate_size
snake_case__ : str =hidden_act
snake_case__ : Union[str, Any] =hidden_dropout_prob
snake_case__ : Tuple =attention_probs_dropout_prob
snake_case__ : Tuple =max_position_embeddings
snake_case__ : str =type_vocab_size
snake_case__ : Optional[Any] =type_sequence_label_size
snake_case__ : str =initializer_range
snake_case__ : List[Any] =num_choices
def lowercase__ ( self ):
snake_case__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Tuple =None
if self.use_attention_mask:
snake_case__ : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] =None
if self.use_token_type_ids:
snake_case__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Tuple =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self ):
snake_case__ : Optional[int] =self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[Any] =config_and_inputs
snake_case__ : Dict ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowercase ( _A , unittest.TestCase ):
_a : str = True
_a : Optional[Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self ):
snake_case__ : Optional[Any] =FlaxRoFormerModelTester(self )
@slow
def lowercase__ ( self ):
for model_class_name in self.all_model_classes:
snake_case__ : Tuple =model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=a )
snake_case__ : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class _lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
snake_case__ : Optional[int] =FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case__ : Tuple =jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case__ : str =model(a )[0]
snake_case__ : List[str] =5_0_0_0_0
snake_case__ : str =(1, 6, vocab_size)
self.assertEqual(output.shape , a )
snake_case__ : Optional[int] =jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
| 448 | 0 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :Union[str, Any] , snake_case__ :int=1024 , snake_case__ :List[str]=1024 , snake_case__ :int=False , **snake_case__ :Tuple ) -> Tuple:
_lowercase = AutoTokenizer.from_pretrained(snake_case__ )
_lowercase = SeqaSeqDataset(snake_case__ , snake_case__ , snake_case__ , snake_case__ , type_path='train' , **snake_case__ )
_lowercase = tok.pad_token_id
def get_lens(snake_case__ :Optional[Any] ):
_lowercase = tqdm(
DataLoader(snake_case__ , batch_size=512 , num_workers=8 , shuffle=snake_case__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_lowercase = []
for batch in dl:
_lowercase = batch['input_ids'].ne(snake_case__ ).sum(1 ).tolist()
_lowercase = batch['labels'].ne(snake_case__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(snake_case__ , snake_case__ ):
max_lens.append(max(snake_case__ , snake_case__ ) )
else:
max_lens.extend(snake_case__ )
return max_lens
_lowercase = get_lens(snake_case__ )
_lowercase = SeqaSeqDataset(snake_case__ , snake_case__ , snake_case__ , snake_case__ , type_path='val' , **snake_case__ )
_lowercase = get_lens(snake_case__ )
pickle_save(snake_case__ , train_ds.len_file )
pickle_save(snake_case__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file) | 67 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 | 0 |
def lowerCAmelCase_ ( lowerCamelCase = 3 , lowerCamelCase = 7 , lowerCamelCase = 1000000 ):
__magic_name__ : List[Any] =0
__magic_name__ : str =1
for current_denominator in range(1 , limit + 1 ):
__magic_name__ : List[Any] =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__magic_name__ : List[Any] =current_numerator
__magic_name__ : str =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 367 |
from math import pow, sqrt
def lowerCAmelCase_ ( *lowerCamelCase ):
__magic_name__ : Tuple =len(lowerCamelCase ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCamelCase , lowerCamelCase )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 367 | 1 |
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
a : List[str] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
a : Optional[int] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print("""\n""".join(upper_files) + """\n""")
a : List[Any] = [file for file in filepaths if """ """ in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print("""\n""".join(space_files) + """\n""")
a : Optional[int] = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print("""\n""".join(hyphen_files) + """\n""")
a : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print("""\n""".join(nodir_files) + """\n""")
a : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 218 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a : Dict = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
a : List[Any] = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
a : Dict = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
a : List[Any] = {F"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = FunnelTokenizer
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = 2
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="<unk>" , snake_case__="<sep>" , snake_case__="<pad>" , snake_case__="<cls>" , snake_case__="<mask>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__=True , snake_case__=True , snake_case__=None , snake_case__="##" , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , clean_text=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , wordpieces_prefix=snake_case__ , **snake_case__ , )
lowercase__ : Union[str, Any]= json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
lowercase__ : Any= getattr(snake_case__ , normalizer_state.pop("type" ) )
lowercase__ : Optional[Any]= do_lower_case
lowercase__ : Optional[Any]= strip_accents
lowercase__ : Optional[int]= tokenize_chinese_chars
lowercase__ : Optional[int]= normalizer_class(**snake_case__ )
lowercase__ : Optional[int]= do_lower_case
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
lowercase__ : str= [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : int= [self.sep_token_id]
lowercase__ : List[str]= [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : List[str]= self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 218 | 1 |
def lowerCAmelCase_ ( lowercase: int = 100 ) -> int:
'''simple docstring'''
_UpperCamelCase: Optional[int] = 0
_UpperCamelCase: Optional[int] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""") | 264 | import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCAmelCase_ ( lowercase: Optional[int] , lowercase: Any , lowercase: str , lowercase: List[str] , lowercase: Optional[int] ) -> int:
'''simple docstring'''
# load base model
_UpperCamelCase: Optional[int] = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_UpperCamelCase: List[str] = load_file(lowercase )
_UpperCamelCase: Optional[int] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_UpperCamelCase: int = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
_UpperCamelCase: str = pipeline.text_encoder
else:
_UpperCamelCase: Optional[Any] = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
_UpperCamelCase: Any = pipeline.unet
# find the target layer
_UpperCamelCase: Union[str, Any] = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
_UpperCamelCase: Union[str, Any] = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
_UpperCamelCase: Tuple = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_UpperCamelCase: List[Any] = layer_infos.pop(0 )
_UpperCamelCase: Any = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_UpperCamelCase: str = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_UpperCamelCase: Optional[int] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
_UpperCamelCase: str = state_dict[pair_keys[0]].to(torch.floataa )
_UpperCamelCase: Dict = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.7_5, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.base_model_path
UpperCAmelCase_ = args.checkpoint_path
UpperCAmelCase_ = args.dump_path
UpperCAmelCase_ = args.lora_prefix_unet
UpperCAmelCase_ = args.lora_prefix_text_encoder
UpperCAmelCase_ = args.alpha
UpperCAmelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCAmelCase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 264 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Optional[Any] = logging.get_logger(__name__)
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
if "resnet-50" in model_name:
lowerCamelCase = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
lowerCamelCase = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
lowerCamelCase = DetrConfig(use_timm_backbone=__lowerCAmelCase , backbone_config=__lowerCAmelCase )
# set label attributes
lowerCamelCase = "panoptic" in model_name
if is_panoptic:
lowerCamelCase = 250
else:
lowerCamelCase = 91
lowerCamelCase = "huggingface/label-files"
lowerCamelCase = "coco-detection-id2label.json"
lowerCamelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
lowerCamelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase = idalabel
lowerCamelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = state_dict.pop(__lowerCAmelCase )
lowerCamelCase = val
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__=False ):
"""simple docstring"""
lowerCamelCase = ""
if is_panoptic:
lowerCamelCase = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase = in_proj_weight[:256, :]
lowerCamelCase = in_proj_bias[:256]
lowerCamelCase = in_proj_weight[256:512, :]
lowerCamelCase = in_proj_bias[256:512]
lowerCamelCase = in_proj_weight[-256:, :]
lowerCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase = in_proj_weight[:256, :]
lowerCamelCase = in_proj_bias[:256]
lowerCamelCase = in_proj_weight[256:512, :]
lowerCamelCase = in_proj_bias[256:512]
lowerCamelCase = in_proj_weight[-256:, :]
lowerCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCamelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowerCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCamelCase = in_proj_weight_cross_attn[:256, :]
lowerCamelCase = in_proj_bias_cross_attn[:256]
lowerCamelCase = in_proj_weight_cross_attn[256:512, :]
lowerCamelCase = in_proj_bias_cross_attn[256:512]
lowerCamelCase = in_proj_weight_cross_attn[-256:, :]
lowerCamelCase = in_proj_bias_cross_attn[-256:]
def __lowercase( ):
"""simple docstring"""
lowerCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=False ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = get_detr_config(__lowerCAmelCase )
# load original model from torch hub
lowerCamelCase = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F"""Converting model {model_name}...""" )
lowerCamelCase = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=__lowerCAmelCase ).eval()
lowerCamelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__lowerCAmelCase ):
if is_panoptic:
lowerCamelCase = "detr." + src
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase , is_panoptic=__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
lowerCamelCase = state_dict.pop(__lowerCAmelCase )
lowerCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase = state_dict.pop(__lowerCAmelCase )
lowerCamelCase = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
lowerCamelCase = state_dict.pop(__lowerCAmelCase )
lowerCamelCase = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowerCamelCase = state_dict.pop(__lowerCAmelCase )
lowerCamelCase = val
# finally, create HuggingFace model and load state dict
lowerCamelCase = DetrForSegmentation(__lowerCAmelCase ) if is_panoptic else DetrForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify our conversion on an image
lowerCamelCase = "coco_panoptic" if is_panoptic else "coco_detection"
lowerCamelCase = DetrImageProcessor(format=__lowerCAmelCase )
lowerCamelCase = processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase = encoding["pixel_values"]
lowerCamelCase = detr(__lowerCAmelCase )
lowerCamelCase = model(__lowerCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
a_ : Union[str, Any] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 623 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = "Hello world! cécé herlolip"
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : bool ) -> Optional[int]:
__lowerCamelCase = FairseqRobertaModel.from_pretrained(__lowerCAmelCase )
roberta.eval() # disable dropout
__lowerCamelCase = roberta.model.encoder.sentence_encoder
__lowerCamelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__lowerCamelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , __lowerCAmelCase )
__lowerCamelCase = XLMRobertaXLForSequenceClassification(__lowerCAmelCase ) if classification_head else XLMRobertaXLForMaskedLM(__lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCamelCase = roberta_sent_encoder.embed_tokens.weight
__lowerCamelCase = roberta_sent_encoder.embed_positions.weight
__lowerCamelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__lowerCamelCase = roberta_sent_encoder.layer_norm.weight
__lowerCamelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCamelCase = model.roberta.encoder.layer[i]
__lowerCamelCase = roberta_sent_encoder.layers[i]
__lowerCamelCase = layer.attention
__lowerCamelCase = roberta_layer.self_attn_layer_norm.weight
__lowerCamelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
__lowerCamelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__lowerCamelCase = roberta_layer.self_attn.q_proj.weight
__lowerCamelCase = roberta_layer.self_attn.q_proj.bias
__lowerCamelCase = roberta_layer.self_attn.k_proj.weight
__lowerCamelCase = roberta_layer.self_attn.k_proj.bias
__lowerCamelCase = roberta_layer.self_attn.v_proj.weight
__lowerCamelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
__lowerCamelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__lowerCamelCase = roberta_layer.self_attn.out_proj.weight
__lowerCamelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__lowerCamelCase = roberta_layer.final_layer_norm.weight
__lowerCamelCase = roberta_layer.final_layer_norm.bias
# intermediate
__lowerCamelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCamelCase = roberta_layer.fca.weight
__lowerCamelCase = roberta_layer.fca.bias
# output
__lowerCamelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCamelCase = roberta_layer.fca.weight
__lowerCamelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
__lowerCamelCase = roberta.model.classification_heads['''mnli'''].dense.weight
__lowerCamelCase = roberta.model.classification_heads['''mnli'''].dense.bias
__lowerCamelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight
__lowerCamelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__lowerCamelCase = roberta.model.encoder.lm_head.dense.weight
__lowerCamelCase = roberta.model.encoder.lm_head.dense.bias
__lowerCamelCase = roberta.model.encoder.lm_head.layer_norm.weight
__lowerCamelCase = roberta.model.encoder.lm_head.layer_norm.bias
__lowerCamelCase = roberta.model.encoder.lm_head.weight
__lowerCamelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCamelCase = roberta.encode(__lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
__lowerCamelCase = model(__lowerCAmelCase )[0]
if classification_head:
__lowerCamelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(__lowerCAmelCase ) )
else:
__lowerCamelCase = roberta.model(__lowerCAmelCase )[0]
print(our_output.shape , their_output.shape )
__lowerCamelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
__lowerCamelCase = torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(__lowerCAmelCase ).mkdir(parents=__lowerCAmelCase , exist_ok=__lowerCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298 | 0 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
UpperCAmelCase_ =input("""Enter image url: """).strip()
print(F'''Downloading image from {url} ...''')
UpperCAmelCase_ =BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
UpperCAmelCase_ =soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
UpperCAmelCase_ =requests.get(image_url).content
UpperCAmelCase_ =F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 718 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__a : Any =1
@register_to_config
def __init__( self , UpperCAmelCase_=20_00 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=1E-3 ):
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase_ , device=UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCAmelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCAmelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCAmelCase = std.unsqueeze(-1 )
lowerCAmelCase = -score / std
# compute
lowerCAmelCase = -1.0 / len(self.timesteps )
lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCAmelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCAmelCase = beta_t.unsqueeze(-1 )
lowerCAmelCase = -0.5 * beta_t * x
lowerCAmelCase = torch.sqrt(UpperCAmelCase_ )
lowerCAmelCase = drift - diffusion**2 * score
lowerCAmelCase = x + drift * dt
# add noise
lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase_ , device=x.device , dtype=x.dtype )
lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
return self.config.num_train_timesteps
| 33 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCAmelCase : Any ={'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple =['DPTFeatureExtractor']
__lowerCAmelCase : List[Any] =['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__( self :int ) -> Optional[int]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = '''french fries'''
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
__SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__( self :Tuple ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae''']
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __magic_name__( self :Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = self.get_inputs()
__SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = self.get_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__( self :Dict ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__( self :List[str] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Dict = self.get_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__( self :int ) -> Tuple:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) )
__SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
__SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 696 | 1 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase__ :Any = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase__ :int = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase__ , UpperCAmelCase__ :Union[str, Any] = matrix[1][1], matrix[0][0]
UpperCAmelCase__ , UpperCAmelCase__ :Optional[Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowerCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase__ :Tuple = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
UpperCAmelCase__ :List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase__ :Optional[Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase__ :Any = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase__ :Optional[Any] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase__ :Optional[int] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase__ :Union[str, Any] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase__ :int = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase__ :Union[str, Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase__ :List[str] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase__ :List[str] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase__ :Optional[int] = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase__ :Union[str, Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase__ :Union[str, Any] = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowerCamelCase )
# Calculate the inverse of the matrix
return [[float(d(_lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 700 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__snake_case : Optional[Any] = '\\n\n'
__snake_case : List[Any] = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__snake_case : Tuple = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCamelCase__ ( datasets.Metric):
'''simple docstring'''
def A__ ( self ) ->int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def A__ ( self , A , A , A = 16 , A = True , A=None ) ->Tuple:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase__ :Union[str, Any] = 'cuda'
else:
UpperCAmelCase__ :Optional[int] = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase__ :Optional[int] = AutoModelForCausalLM.from_pretrained(A )
UpperCAmelCase__ :Any = model.to(A )
UpperCAmelCase__ :Optional[int] = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase__ :str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase__ :List[Any] = model.config.max_length - 1
else:
UpperCAmelCase__ :List[Any] = model.config.max_length
UpperCAmelCase__ :Tuple = tokenizer(
A , add_special_tokens=A , padding=A , truncation=A , max_length=A , return_tensors='pt' , return_attention_mask=A , ).to(A )
UpperCAmelCase__ :List[Any] = encodings['input_ids']
UpperCAmelCase__ :str = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase__ :Union[str, Any] = []
UpperCAmelCase__ :int = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(A ) , A ) ):
UpperCAmelCase__ :int = min(start_index + batch_size , len(A ) )
UpperCAmelCase__ :str = encoded_texts[start_index:end_index]
UpperCAmelCase__ :List[Any] = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase__ :List[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
UpperCAmelCase__ :Any = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase__ :Optional[int] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A ), attn_mask] , dim=1 )
UpperCAmelCase__ :int = encoded_batch
with torch.no_grad():
UpperCAmelCase__ :Optional[Any] = model(A , attention_mask=A ).logits
UpperCAmelCase__ :str = out_logits[..., :-1, :].contiguous()
UpperCAmelCase__ :Dict = labels[..., 1:].contiguous()
UpperCAmelCase__ :Any = attn_mask[..., 1:].contiguous()
UpperCAmelCase__ :int = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 433 | 0 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : Any , ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
__snake_case , __snake_case = input_paths_and_base_extractors[compression_format]
if input_path is None:
__snake_case = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowercase__ )
assert base_extractor.is_extractable(lowercase__ )
__snake_case = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(lowercase__ , lowercase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case = file_path.read_text(encoding='utf-8' )
else:
__snake_case = output_path.read_text(encoding='utf-8' )
__snake_case = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def _a (lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : Dict , lowercase__ : int , lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , ) -> Dict:
"""simple docstring"""
__snake_case = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
__snake_case = input_paths[compression_format]
if input_path is None:
__snake_case = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowercase__ )
__snake_case = Extractor.infer_extractor_format(lowercase__ )
assert extractor_format is not None
__snake_case = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(lowercase__ , lowercase__ , lowercase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case = file_path.read_text(encoding='utf-8' )
else:
__snake_case = output_path.read_text(encoding='utf-8' )
__snake_case = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _a (lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
import tarfile
__snake_case = tmp_path / 'data_dot_dot'
directory.mkdir()
__snake_case = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(lowercase__ , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def _a (lowercase__ : str ) -> Dict:
"""simple docstring"""
import tarfile
__snake_case = tmp_path / 'data_sym_link'
directory.mkdir()
__snake_case = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=lowercase__ )
with tarfile.TarFile(lowercase__ , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def _a (lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[Any] ) -> str:
"""simple docstring"""
__snake_case = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
__snake_case = insecure_tar_files[insecure_tar_file]
__snake_case = tmp_path / 'extracted'
TarExtractor.extract(lowercase__ , lowercase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _a (lowercase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__snake_case = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
__snake_case = (
B'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
B'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
B'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
B'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(lowercase__ )
assert zipfile.is_zipfile(str(lowercase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowercase__ ) # but we're right
| 56 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "timesformer"
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str]=224 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1e-6 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]="divided_space_time" , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = num_frames
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = qkv_bias
__snake_case = attention_type
__snake_case = drop_path_rate
| 56 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class snake_case :
def __init__( self :List[str] , _lowerCamelCase :List[str] , _lowerCamelCase :str=1_3 , _lowerCamelCase :Union[str, Any]=7 , _lowerCamelCase :Optional[int]=True , _lowerCamelCase :Optional[int]=True , _lowerCamelCase :List[str]=True , _lowerCamelCase :str=True , _lowerCamelCase :List[str]=9_9 , _lowerCamelCase :List[str]=6_4 , _lowerCamelCase :List[str]=5 , _lowerCamelCase :Optional[Any]=4 , _lowerCamelCase :Union[str, Any]=3_7 , _lowerCamelCase :Tuple="gelu" , _lowerCamelCase :Dict=0.1 , _lowerCamelCase :Dict=0.1 , _lowerCamelCase :Optional[int]=5_1_2 , _lowerCamelCase :List[str]=1_6 , _lowerCamelCase :Tuple=2 , _lowerCamelCase :Tuple=0.0_2 , _lowerCamelCase :List[str]=3 , _lowerCamelCase :List[str]=4 , _lowerCamelCase :str=None , ):
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : int = batch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = seq_length
__SCREAMING_SNAKE_CASE : Tuple = is_training
__SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Optional[int] = use_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
__SCREAMING_SNAKE_CASE : str = hidden_size
__SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
__SCREAMING_SNAKE_CASE : str = type_vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Dict = initializer_range
__SCREAMING_SNAKE_CASE : int = num_labels
__SCREAMING_SNAKE_CASE : Dict = num_choices
__SCREAMING_SNAKE_CASE : Tuple = scope
__SCREAMING_SNAKE_CASE : Optional[int] = vocab_size - 1
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE : Tuple = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE_ ( self :Any , _lowerCamelCase :Tuple , _lowerCamelCase :Optional[int] , _lowerCamelCase :str ):
__SCREAMING_SNAKE_CASE : Dict = GPTNeoXModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :str , _lowerCamelCase :Optional[int] , _lowerCamelCase :Tuple ):
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = GPTNeoXModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :Optional[int] , _lowerCamelCase :int , _lowerCamelCase :Any , _lowerCamelCase :List[str] ):
__SCREAMING_SNAKE_CASE : str = GPTNeoXForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self :Any , _lowerCamelCase :List[Any] , _lowerCamelCase :Tuple , _lowerCamelCase :Any , _lowerCamelCase :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Any = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = GPTNeoXForQuestionAnswering(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Dict ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Any = GPTNeoXForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , _lowerCamelCase :str , _lowerCamelCase :Dict , _lowerCamelCase :List[str] , _lowerCamelCase :Optional[Any] ):
__SCREAMING_SNAKE_CASE : Any = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTNeoXForTokenClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :Tuple , _lowerCamelCase :str , _lowerCamelCase :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : Any = GPTNeoXForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# first forward pass
__SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase , attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE : Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase , output_hidden_states=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = output_from_no_past['''hidden_states'''][0]
__SCREAMING_SNAKE_CASE : Optional[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )['''hidden_states'''][0]
# select random slice
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE : int = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
__SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase__ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
__SCREAMING_SNAKE_CASE : Optional[int] = GPTNeoXModelTester(self )
__SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=6_4 , num_attention_heads=8 )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :str ):
# This regression test was failing with PyTorch < 1.3
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
__SCREAMING_SNAKE_CASE : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :Any ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([1, 1_0] , config.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE : List[Any] = GPTNeoXModel(_lowerCamelCase )
original_model.to(_lowerCamelCase )
original_model.eval()
__SCREAMING_SNAKE_CASE : str = original_model(_lowerCamelCase ).last_hidden_state
__SCREAMING_SNAKE_CASE : List[str] = original_model(_lowerCamelCase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE : List[Any] = {'''type''': scaling_type, '''factor''': 1_0.0}
__SCREAMING_SNAKE_CASE : int = GPTNeoXModel(_lowerCamelCase )
scaled_model.to(_lowerCamelCase )
scaled_model.eval()
__SCREAMING_SNAKE_CASE : List[str] = scaled_model(_lowerCamelCase ).last_hidden_state
__SCREAMING_SNAKE_CASE : List[str] = scaled_model(_lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(_lowerCamelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__SCREAMING_SNAKE_CASE : List[Any] = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=2_0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase )[0]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 401 |
"""simple docstring"""
def lowerCAmelCase_ ( lowercase_ : int , lowercase_ : int , lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowerCAmelCase_ ( ):
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 401 | 1 |
import argparse
import os
import re
__UpperCamelCase: Any = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__UpperCamelCase: Dict = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
__UpperCamelCase: List[str] = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""")
def SCREAMING_SNAKE_CASE__ ( _lowercase : int , _lowercase : bool = False ) -> List[str]:
'''simple docstring'''
with open(_lowercase , 'r' , encoding='utf-8' ) as f:
lowercase__ : Union[str, Any] = f.read()
lowercase__ : Optional[Any] = content.split('\n' )
lowercase__ : Optional[int] = []
lowercase__ : int = 0
while line_idx < len(_lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowercase__ : Tuple = len(re.search(r'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowercase__ : Tuple = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowercase__ : Any = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowercase__ : List[str] = sorted(_lowercase , key=lambda _lowercase : _re_identifier.search(_lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_lowercase ) )
elif "\n".join(_lowercase ) != content:
return True
def SCREAMING_SNAKE_CASE__ ( _lowercase : bool = False ) -> List[Any]:
'''simple docstring'''
lowercase__ : List[Any] = [os.path.join(_lowercase , _lowercase ) for f in os.listdir(_lowercase ) if f.endswith('.py' )]
lowercase__ : str = [sort_auto_mapping(_lowercase , overwrite=_lowercase ) for fname in fnames]
if not overwrite and any(_lowercase ):
lowercase__ : List[Any] = [f for f, d in zip(_lowercase , _lowercase ) if d]
raise ValueError(
f"""The following files have auto mappings that need sorting: {", ".join(_lowercase )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
__UpperCamelCase: Tuple = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
__UpperCamelCase: List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 266 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_A = PriorTransformer
_A = "hidden_states"
@property
def snake_case__( self: Union[str, Any] ):
lowercase__ : List[Any] = 4
lowercase__ : Optional[int] = 8
lowercase__ : List[str] = 7
lowercase__ : Optional[Any] = floats_tensor((batch_size, embedding_dim) ).to(lowerCamelCase_ )
lowercase__ : Union[str, Any] = floats_tensor((batch_size, embedding_dim) ).to(lowerCamelCase_ )
lowercase__ : Any = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__( self: int, lowerCamelCase_: Dict=0 ):
torch.manual_seed(lowerCamelCase_ )
lowercase__ : Tuple = 4
lowercase__ : List[Any] = 8
lowercase__ : List[str] = 7
lowercase__ : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase_ )
lowercase__ : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase_ )
lowercase__ : List[str] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case__( self: str ):
return (4, 8)
@property
def snake_case__( self: List[str] ):
return (4, 8)
def snake_case__( self: Dict ):
lowercase__ : int = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
lowercase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def snake_case__( self: int ):
lowercase__ , lowercase__ : Dict = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy', output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info['missing_keys'] ), 0 )
model.to(lowerCamelCase_ )
lowercase__ : Tuple = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case__( self: str ):
lowercase__ , lowercase__ : List[Any] = self.prepare_init_args_and_inputs_for_common()
lowercase__ : Optional[int] = self.model_class(**lowerCamelCase_ )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Dict = [*signature.parameters.keys()]
lowercase__ : List[str] = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2], lowerCamelCase_ )
def snake_case__( self: Union[str, Any] ):
lowercase__ : str = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
lowercase__ : Optional[int] = model.to(lowerCamelCase_ )
if hasattr(lowerCamelCase_, 'set_default_attn_processor' ):
model.set_default_attn_processor()
lowercase__ : List[str] = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ : str = model(**lowerCamelCase_ )[0]
lowercase__ : int = output[0, :5].flatten().cpu()
print(lowerCamelCase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ : List[str] = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(lowerCamelCase_, lowerCamelCase_, rtol=1E-2 ) )
@slow
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self: Tuple, lowerCamelCase_: Union[str, Any]=1, lowerCamelCase_: Tuple=768, lowerCamelCase_: Dict=77, lowerCamelCase_: Union[str, Any]=0 ):
torch.manual_seed(lowerCamelCase_ )
lowercase__ : Dict = batch_size
lowercase__ : Dict = embedding_dim
lowercase__ : Dict = num_embeddings
lowercase__ : int = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase_ )
lowercase__ : str = torch.randn((batch_size, embedding_dim) ).to(lowerCamelCase_ )
lowercase__ : List[str] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCamelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def snake_case__( self: Optional[Any], lowerCamelCase_: List[str], lowerCamelCase_: str ):
lowercase__ : List[Any] = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior', subfolder='prior' )
model.to(lowerCamelCase_ )
lowercase__ : Optional[int] = self.get_dummy_seed_input(seed=lowerCamelCase_ )
with torch.no_grad():
lowercase__ : List[str] = model(**lowerCamelCase_ )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ : Union[str, Any] = sample[0, :8].flatten().cpu()
print(lowerCamelCase_ )
lowercase__ : Optional[Any] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_, lowerCamelCase_, atol=1E-3 )
| 266 | 1 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
lowerCAmelCase_ = logging.get_logger(__name__)
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Optional[int] = os.getenv("SM_HP_MP_PARAMETERS" , "{}")
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCamelCase : List[Any] = json.loads(A)
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCamelCase : Optional[Any] = os.getenv("SM_FRAMEWORK_PARAMS" , "{}")
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCamelCase : int = json.loads(A)
if not mpi_options.get("sagemaker_mpi_enabled" , A):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed") is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowerCamelCase , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> "torch.device":
'''simple docstring'''
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
UpperCamelCase : Dict = torch.device("cpu" )
UpperCamelCase : str = 0
elif is_sagemaker_model_parallel_available():
UpperCamelCase : List[str] = smp.local_rank()
UpperCamelCase : Any = torch.device("cuda" , lowerCamelCase )
UpperCamelCase : Optional[Any] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
UpperCamelCase : Any = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
UpperCamelCase : Tuple = torch.device("cuda" , self.local_rank )
UpperCamelCase : Optional[Any] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCamelCase : List[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCamelCase : Tuple = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
UpperCamelCase : Tuple = torch.device("cuda" , self.local_rank )
UpperCamelCase : List[str] = 1
if device.type == "cuda":
torch.cuda.set_device(lowerCamelCase )
return device
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
return False
| 714 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__ ( A : int):
'''simple docstring'''
UpperCamelCase : int = int(number**0.5)
return number == sq * sq
def A__ ( A : int , A : int , A : int , A : int , A : int , A : int):
'''simple docstring'''
UpperCamelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase : int = x_den * y_den * z_den
UpperCamelCase : int = gcd(A , A)
top //= hcf
bottom //= hcf
return top, bottom
def A__ ( A : int = 35):
'''simple docstring'''
UpperCamelCase : set = set()
UpperCamelCase : int
UpperCamelCase : Fraction = Fraction(0)
UpperCamelCase : tuple[int, int]
for x_num in range(1 , order + 1):
for x_den in range(x_num + 1 , order + 1):
for y_num in range(1 , order + 1):
for y_den in range(y_num + 1 , order + 1):
# n=1
UpperCamelCase : Optional[int] = x_num * y_den + x_den * y_num
UpperCamelCase : str = x_den * y_den
UpperCamelCase : Dict = gcd(A , A)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : Dict = add_three(
A , A , A , A , A , A)
unique_s.add(A)
# n=2
UpperCamelCase : Optional[Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase : List[str] = x_den * x_den * y_den * y_den
if is_sq(A) and is_sq(A):
UpperCamelCase : Dict = int(sqrt(A))
UpperCamelCase : Union[str, Any] = int(sqrt(A))
UpperCamelCase : Union[str, Any] = gcd(A , A)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : List[str] = add_three(
A , A , A , A , A , A)
unique_s.add(A)
# n=-1
UpperCamelCase : Union[str, Any] = x_num * y_num
UpperCamelCase : List[str] = x_den * y_num + x_num * y_den
UpperCamelCase : List[str] = gcd(A , A)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : List[str] = add_three(
A , A , A , A , A , A)
unique_s.add(A)
# n=2
UpperCamelCase : int = x_num * x_num * y_num * y_num
UpperCamelCase : Any = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(A) and is_sq(A):
UpperCamelCase : Optional[int] = int(sqrt(A))
UpperCamelCase : Union[str, Any] = int(sqrt(A))
UpperCamelCase : str = gcd(A , A)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : Union[str, Any] = add_three(
A , A , A , A , A , A)
unique_s.add(A)
for num, den in unique_s:
total += Fraction(A , A)
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 435 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 267 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class snake_case__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase : int = text_generator("This is a test" , do_sample=lowercase )
self.assertEqual(
lowercase , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
UpperCAmelCase : List[Any] = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
lowercase , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
UpperCAmelCase : Any = text_generator("This is a test" , do_sample=lowercase , num_return_sequences=2 , return_tensors=lowercase )
self.assertEqual(
lowercase , [
{"generated_token_ids": ANY(lowercase )},
{"generated_token_ids": ANY(lowercase )},
] , )
UpperCAmelCase : Dict = text_generator.model.config.eos_token_id
UpperCAmelCase : List[str] = "<pad>"
UpperCAmelCase : List[str] = text_generator(
["This is a test", "This is a second test"] , do_sample=lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=lowercase , )
self.assertEqual(
lowercase , [
[
{"generated_token_ids": ANY(lowercase )},
{"generated_token_ids": ANY(lowercase )},
],
[
{"generated_token_ids": ANY(lowercase )},
{"generated_token_ids": ANY(lowercase )},
],
] , )
@require_tf
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase : Union[str, Any] = text_generator("This is a test" , do_sample=lowercase )
self.assertEqual(
lowercase , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
UpperCAmelCase : List[str] = text_generator(["This is a test", "This is a second test"] , do_sample=lowercase )
self.assertEqual(
lowercase , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def __lowerCAmelCase ( self : str , lowercase : str , lowercase : str , lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = TextGenerationPipeline(model=lowercase , tokenizer=lowercase )
return text_generator, ["This is a test", "Another test"]
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase : Tuple = "Hello I believe in"
UpperCAmelCase : Dict = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
UpperCAmelCase : Union[str, Any] = text_generator(lowercase )
self.assertEqual(
lowercase , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
UpperCAmelCase : Optional[int] = text_generator(lowercase , stop_sequence=" fe" )
self.assertEqual(lowercase , [{"generated_text": "Hello I believe in fe"}] )
def __lowerCAmelCase ( self : str , lowercase : int , lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = text_generator.model
UpperCAmelCase : Tuple = text_generator.tokenizer
UpperCAmelCase : Tuple = text_generator("This is a test" )
self.assertEqual(lowercase , [{"generated_text": ANY(lowercase )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
UpperCAmelCase : int = text_generator("This is a test" , return_full_text=lowercase )
self.assertEqual(lowercase , [{"generated_text": ANY(lowercase )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
UpperCAmelCase : Tuple = pipeline(task="text-generation" , model=lowercase , tokenizer=lowercase , return_full_text=lowercase )
UpperCAmelCase : Any = text_generator("This is a test" )
self.assertEqual(lowercase , [{"generated_text": ANY(lowercase )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
UpperCAmelCase : int = text_generator("This is a test" , return_full_text=lowercase )
self.assertEqual(lowercase , [{"generated_text": ANY(lowercase )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
UpperCAmelCase : Union[str, Any] = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowercase )
self.assertEqual(
lowercase , [
[{"generated_text": ANY(lowercase )}, {"generated_text": ANY(lowercase )}],
[{"generated_text": ANY(lowercase )}, {"generated_text": ANY(lowercase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCAmelCase : int = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowercase )
self.assertEqual(
lowercase , [
[{"generated_text": ANY(lowercase )}, {"generated_text": ANY(lowercase )}],
[{"generated_text": ANY(lowercase )}, {"generated_text": ANY(lowercase )}],
] , )
with self.assertRaises(lowercase ):
UpperCAmelCase : Optional[int] = text_generator("test" , return_full_text=lowercase , return_text=lowercase )
with self.assertRaises(lowercase ):
UpperCAmelCase : Tuple = text_generator("test" , return_full_text=lowercase , return_tensors=lowercase )
with self.assertRaises(lowercase ):
UpperCAmelCase : List[Any] = text_generator("test" , return_text=lowercase , return_tensors=lowercase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCAmelCase : List[str] = text_generator("" )
self.assertEqual(lowercase , [{"generated_text": ANY(lowercase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCAmelCase : Dict = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCAmelCase : Union[str, Any] = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 5_00 , max_new_tokens=20 )
UpperCAmelCase : Tuple = text_generator("This is a test" * 5_00 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(lowercase ):
text_generator(
"This is a test" * 5_00 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
import torch
# Classic `model_kwargs`
UpperCAmelCase : List[Any] = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase : Union[str, Any] = pipe("This is a test" )
self.assertEqual(
lowercase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCAmelCase : List[Any] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase : Optional[int] = pipe("This is a test" )
self.assertEqual(
lowercase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCAmelCase : List[str] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCAmelCase : List[Any] = pipe("This is a test" )
self.assertEqual(
lowercase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
import torch
UpperCAmelCase : Tuple = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
import torch
UpperCAmelCase : Any = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=lowercase , top_p=0.5 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "Hello world"
UpperCAmelCase : Optional[int] = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
UpperCAmelCase : Optional[int] = logging.get_logger("transformers.generation.tf_utils" )
else:
UpperCAmelCase : List[str] = logging.get_logger("transformers.generation.utils" )
UpperCAmelCase : List[Any] = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowercase ) as cl:
UpperCAmelCase : str = text_generator(lowercase , max_length=10 , max_new_tokens=1 )
self.assertIn(lowercase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(lowercase ) as cl:
UpperCAmelCase : List[str] = text_generator(lowercase , max_new_tokens=1 )
self.assertNotIn(lowercase , cl.out )
with CaptureLogger(lowercase ) as cl:
UpperCAmelCase : int = text_generator(lowercase , max_length=10 )
self.assertNotIn(lowercase , cl.out )
| 595 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = list(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : Tuple = list(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(len(lowerCAmelCase__ ) ):
if lista[i] != lista[i]:
count += 1
_SCREAMING_SNAKE_CASE : Dict = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase__ )
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = []
while True:
_SCREAMING_SNAKE_CASE : List[Any] = ['$'] * len(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : int = []
for i in range(len(lowerCAmelCase__ ) ):
for j in range(i + 1, len(lowerCAmelCase__ ) ):
_SCREAMING_SNAKE_CASE : Optional[Any] = compare_string(binary[i], binary[j] )
if k is False:
_SCREAMING_SNAKE_CASE : Union[str, Any] = '*'
_SCREAMING_SNAKE_CASE : Any = '*'
temp.append("X" )
for i in range(len(lowerCAmelCase__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase__ ) == 0:
return pi
_SCREAMING_SNAKE_CASE : List[str] = list(set(lowerCAmelCase__ ) )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = []
for minterm in minterms:
_SCREAMING_SNAKE_CASE : Any = ''
for _ in range(lowerCAmelCase__ ):
_SCREAMING_SNAKE_CASE : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase__ )
return temp
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = list(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : Any = list(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(len(lowerCAmelCase__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : str = [0] * len(lowerCAmelCase__ )
for i in range(len(chart[0] ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Optional[int] = -1
for j in range(len(lowerCAmelCase__ ) ):
if chart[j][i] == 1:
count += 1
_SCREAMING_SNAKE_CASE : Tuple = j
if count == 1:
_SCREAMING_SNAKE_CASE : Optional[int] = 1
for i in range(len(lowerCAmelCase__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase__ ) ):
_SCREAMING_SNAKE_CASE : List[str] = 0
temp.append(prime_implicants[i] )
while True:
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
_SCREAMING_SNAKE_CASE : List[str] = -1
_SCREAMING_SNAKE_CASE : Tuple = 0
for i in range(len(lowerCAmelCase__ ) ):
_SCREAMING_SNAKE_CASE : Optional[int] = chart[i].count(1 )
if count_n > max_n:
_SCREAMING_SNAKE_CASE : str = count_n
_SCREAMING_SNAKE_CASE : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase__ ) ):
_SCREAMING_SNAKE_CASE : str = 0
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = [[0 for x in range(len(lowerCAmelCase__ ) )] for x in range(len(lowerCAmelCase__ ) )]
for i in range(len(lowerCAmelCase__ ) ):
_SCREAMING_SNAKE_CASE : List[Any] = prime_implicants[i].count("_" )
for j in range(len(lowerCAmelCase__ ) ):
if is_for_table(prime_implicants[i], binary[j], lowerCAmelCase__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = 1
return chart
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Any = int(input("Enter the no. of variables\n" ) )
_SCREAMING_SNAKE_CASE : str = [
float(lowerCAmelCase__ )
for x in input(
"Enter the decimal representation of Minterms \'Spaces Separated\'\n" ).split()
]
_SCREAMING_SNAKE_CASE : str = decimal_to_binary(lowerCAmelCase__, lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : List[Any] = check(lowerCAmelCase__ )
print("Prime Implicants are:" )
print(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : Dict = prime_implicant_chart(lowerCAmelCase__, lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = selection(lowerCAmelCase__, lowerCAmelCase__ )
print("Essential Prime Implicants are:" )
print(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 709 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'align_text_model'
def __init__( self , __lowerCamelCase=3_0_5_2_2 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-12 , __lowerCamelCase=0 , __lowerCamelCase="absolute" , __lowerCamelCase=True , **__lowerCamelCase , ) -> List[Any]:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = vocab_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
_SCREAMING_SNAKE_CASE : int = hidden_act
_SCREAMING_SNAKE_CASE : Any = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
_SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
_SCREAMING_SNAKE_CASE : str = position_embedding_type
_SCREAMING_SNAKE_CASE : Dict = use_cache
_SCREAMING_SNAKE_CASE : List[str] = pad_token_id
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
_SCREAMING_SNAKE_CASE : Union[str, Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'align_vision_model'
def __init__( self , __lowerCamelCase = 3 , __lowerCamelCase = 6_0_0 , __lowerCamelCase = 2.0 , __lowerCamelCase = 3.1 , __lowerCamelCase = 8 , __lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , __lowerCamelCase = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __lowerCamelCase = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __lowerCamelCase = [] , __lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , __lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , __lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , __lowerCamelCase = 0.25 , __lowerCamelCase = "swish" , __lowerCamelCase = 2_5_6_0 , __lowerCamelCase = "mean" , __lowerCamelCase = 0.02 , __lowerCamelCase = 0.001 , __lowerCamelCase = 0.99 , __lowerCamelCase = 0.2 , **__lowerCamelCase , ) -> Dict:
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = num_channels
_SCREAMING_SNAKE_CASE : List[Any] = image_size
_SCREAMING_SNAKE_CASE : Dict = width_coefficient
_SCREAMING_SNAKE_CASE : str = depth_coefficient
_SCREAMING_SNAKE_CASE : Union[str, Any] = depth_divisor
_SCREAMING_SNAKE_CASE : List[Any] = kernel_sizes
_SCREAMING_SNAKE_CASE : Tuple = in_channels
_SCREAMING_SNAKE_CASE : Optional[int] = out_channels
_SCREAMING_SNAKE_CASE : List[Any] = depthwise_padding
_SCREAMING_SNAKE_CASE : str = strides
_SCREAMING_SNAKE_CASE : List[str] = num_block_repeats
_SCREAMING_SNAKE_CASE : Tuple = expand_ratios
_SCREAMING_SNAKE_CASE : int = squeeze_expansion_ratio
_SCREAMING_SNAKE_CASE : List[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_dim
_SCREAMING_SNAKE_CASE : Dict = pooling_type
_SCREAMING_SNAKE_CASE : List[Any] = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = batch_norm_eps
_SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
_SCREAMING_SNAKE_CASE : int = drop_connect_rate
_SCREAMING_SNAKE_CASE : Tuple = sum(__lowerCamelCase ) * 4
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
_SCREAMING_SNAKE_CASE : int = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'align'
__snake_case = True
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=6_4_0 , __lowerCamelCase=1.0 , __lowerCamelCase=0.02 , **__lowerCamelCase , ) -> List[Any]:
super().__init__(**__lowerCamelCase )
if text_config is None:
_SCREAMING_SNAKE_CASE : List[Any] = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
_SCREAMING_SNAKE_CASE : List[str] = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
_SCREAMING_SNAKE_CASE : Dict = AlignTextConfig(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = AlignVisionConfig(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = projection_dim
_SCREAMING_SNAKE_CASE : List[str] = temperature_init_value
_SCREAMING_SNAKE_CASE : Any = initializer_range
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) -> List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE : Any = self.text_config.to_dict()
_SCREAMING_SNAKE_CASE : Optional[int] = self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE : Dict = self.__class__.model_type
return output | 381 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] = 100_0000 ):
lowercase = limit + 1
lowercase = [0] * limit
for first_term in range(1 , UpperCamelCase__ ):
for n in range(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
lowercase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 588 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_SCREAMING_SNAKE_CASE : List[Any] = Lock()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__magic_name__ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__magic_name__ : int = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__magic_name__ : Optional[Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__magic_name__ : List[str] = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : int = []
__magic_name__ : Union[str, Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__magic_name__ : Union[str, Any] = Pipe()
__magic_name__ : List[str] = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__magic_name__ : int = temp_rs
__magic_name__ : List[str] = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
__magic_name__ : Optional[int] = Pipe()
__magic_name__ : Optional[int] = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__magic_name__ : int = temp_rs
__magic_name__ : Union[str, Any] = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
__magic_name__ : str = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : int = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*UpperCamelCase__ )
__magic_name__ : Tuple = odd_even_transposition(UpperCamelCase__ )
print("Sorted List\n" )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main() | 436 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase : Tuple = 10
def A_( A : list[int]):
UpperCamelCase = 1
UpperCamelCase = max(A)
while placement <= max_digit:
# declare and initialize empty buckets
UpperCamelCase = [[] for _ in range(A)]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCamelCase = int((i / placement) % RADIX)
buckets[tmp].append(A)
# put each buckets' contents into list_of_ints
UpperCamelCase = 0
for b in range(A):
for i in buckets[b]:
UpperCamelCase = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 432 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCAmelCase : Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
if len(A_ ) == 0 or len(A_ ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(A_ ) )
if isinstance(A_ , A_ ):
UpperCamelCase = [sequences]
UpperCamelCase = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_=ZeroShotClassificationArgumentHandler() , *A_ , **A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = args_parser
super().__init__(*A_ , **A_ )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def UpperCAmelCase_ ( self , A_ , A_=True , A_=True , A_=TruncationStrategy.ONLY_FIRST , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
UpperCamelCase = self.tokenizer.eos_token
try:
UpperCamelCase = self.tokenizer(
A_ , add_special_tokens=A_ , return_tensors=A_ , padding=A_ , truncation=A_ , )
except Exception as e:
if "too short" in str(A_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCamelCase = self.tokenizer(
A_ , add_special_tokens=A_ , return_tensors=A_ , padding=A_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase_ ( self , **A_ )-> str:
'''simple docstring'''
if kwargs.get('multi_class' , A_ ) is not None:
UpperCamelCase = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['hypothesis_template']
UpperCamelCase = {}
if "multi_label" in kwargs:
UpperCamelCase = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , A_ , *A_ , **A_ , )-> int:
'''simple docstring'''
if len(A_ ) == 0:
pass
elif len(A_ ) == 1 and "candidate_labels" not in kwargs:
UpperCamelCase = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_=None , A_="This example is {}." )-> List[str]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self._args_parser(A_ , A_ , A_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(A_ , A_ ) ):
UpperCamelCase = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A_ ) - 1,
**model_input,
}
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
UpperCamelCase = inputs['candidate_label']
UpperCamelCase = inputs['sequence']
UpperCamelCase = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCamelCase = self.model(**A_ )
UpperCamelCase = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def UpperCAmelCase_ ( self , A_ , A_=False )-> List[str]:
'''simple docstring'''
UpperCamelCase = [outputs['candidate_label'] for outputs in model_outputs]
UpperCamelCase = [outputs['sequence'] for outputs in model_outputs]
UpperCamelCase = np.concatenate([output['logits'].numpy() for output in model_outputs] )
UpperCamelCase = logits.shape[0]
UpperCamelCase = len(A_ )
UpperCamelCase = N // n
UpperCamelCase = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCamelCase = self.entailment_id
UpperCamelCase = -1 if entailment_id == 0 else 0
UpperCamelCase = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCamelCase = np.exp(A_ ) / np.exp(A_ ).sum(-1 , keepdims=A_ )
UpperCamelCase = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCamelCase = reshaped_outputs[..., self.entailment_id]
UpperCamelCase = np.exp(A_ ) / np.exp(A_ ).sum(-1 , keepdims=A_ )
UpperCamelCase = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 432 | 1 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a : Union[str, Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a : Optional[int] = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
a : Dict = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a : str = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a : List[str] = '''allenai'''
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->str:
'''simple docstring'''
a : List[str] = dict((re.sub(R"@@$" , "" , SCREAMING_SNAKE_CASE_ ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , SCREAMING_SNAKE_CASE_ ), v) for k, v in d.items() )
a : Union[str, Any] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
a : List[str] = d[k] # restore
return da
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Optional[Any] ) ->int:
'''simple docstring'''
assert os.path.exists(SCREAMING_SNAKE_CASE_ )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
a : Tuple = basename(SCREAMING_SNAKE_CASE_ )
a : Any = dirname(SCREAMING_SNAKE_CASE_ )
a : List[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
a : str = cls.hub_models()
a : int = {"bpe": "fastbpe", "tokenizer": "moses"}
a : List[str] = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"""using checkpoint {checkpoint_file}""" )
a : Optional[Any] = hub_utils.from_pretrained(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , archive_map=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
a : Any = vars(chkpt["args"]["model"] )
a : Optional[Any] = args["source_lang"]
a : Optional[int] = args["target_lang"]
a : Optional[int] = dirname(SCREAMING_SNAKE_CASE_ )
a : str = basename(SCREAMING_SNAKE_CASE_ )
# dicts
a : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , F"""dict.{src_lang}.txt""" )
a : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , F"""dict.{tgt_lang}.txt""" )
a : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE_ )
a : Any = rewrite_dict_keys(src_dict.indices )
a : int = len(SCREAMING_SNAKE_CASE_ )
a : int = os.path.join(SCREAMING_SNAKE_CASE_ , "vocab-src.json" )
print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
a : Any = True
for k in src_vocab.keys():
if not k.islower():
a : int = False
break
a : Tuple = Dictionary.load(SCREAMING_SNAKE_CASE_ )
a : Any = rewrite_dict_keys(tgt_dict.indices )
a : str = len(SCREAMING_SNAKE_CASE_ )
a : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , "vocab-tgt.json" )
print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ ) )
# merges_file (bpecodes)
a : List[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
a : int = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
break
with open(SCREAMING_SNAKE_CASE_ , encoding="utf-8" ) as fin:
a : Optional[int] = fin.read()
a : Union[str, Any] = re.sub(R" \d+$" , "" , SCREAMING_SNAKE_CASE_ , 0 , re.M ) # remove frequency number
print(F"""Generating {merges_file}""" )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as fout:
fout.write(SCREAMING_SNAKE_CASE_ )
# model config
a : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args['tokenizer']}"""
a : Optional[int] = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
a : Union[str, Any] = 5
a : Dict = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
a : List[Any] = best_score_hparams[model_dir]["length_penalty"]
else:
a : Optional[int] = 1.0
print(F"""Generating {fsmt_model_config_file}""" )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ ) )
# tokenizer config
a : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a : int = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(F"""Generating {fsmt_tokenizer_config_file}""" )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ ) )
# model
a : Any = chkpt["models"][0]
a : List[Any] = model.state_dict()
# rename keys to start with 'model.'
a : Union[str, Any] = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
a : int = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a : Any = FSMTConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
a : str = FSMTForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# check that it loads ok
model_new.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
# save
a : List[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F"""cd {data_root}""" )
print(F"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : List[Any] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 633 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_snake_case = logging.get_logger(__name__)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
lowerCamelCase : Any = os.path.abspath(SCREAMING_SNAKE_CASE_ )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
lowerCamelCase : Optional[int] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
lowerCamelCase : List[str] = convert_pytorch_state_dict_to_flax(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase : Dict = convert_pytorch_sharded_state_dict_to_flax(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return flax_state_dict
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ) -> bool:
return len(set(SCREAMING_SNAKE_CASE_ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase : Tuple = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase : Any = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase : List[str] = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : int = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase : str = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase : Optional[int] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase : str = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase : Optional[int] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase : Optional[Any] = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase : List[Any] = pt_tuple_key[-2] + "_v"
if name is not None:
lowerCamelCase : str = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase : Optional[int] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase : Union[str, Any] = flax_model.params["params"]
else:
lowerCamelCase : int = flax_model.params
lowerCamelCase : List[str] = flatten_dict(SCREAMING_SNAKE_CASE_ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase : Any = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] = {}
lowerCamelCase : Optional[int] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase : List[Any] = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
lowerCamelCase : List[str] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase , lowerCamelCase : List[str] = rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# add model prefix if necessary
lowerCamelCase : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase : Union[str, Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase : Tuple = jnp.asarray(SCREAMING_SNAKE_CASE_ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase : List[Any] = jnp.asarray(SCREAMING_SNAKE_CASE_ )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase : List[Any] = jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
import torch
# Load the index
lowerCamelCase : str = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase : str = torch.load(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase : str = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase : Optional[int] = flax_model.params["params"]
lowerCamelCase : Any = flatten_dict(SCREAMING_SNAKE_CASE_ )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
lowerCamelCase : Optional[Any] = flax_model.params
lowerCamelCase : Tuple = flatten_dict(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase : Union[str, Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase : List[Any] = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
lowerCamelCase : Optional[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase : int = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase , lowerCamelCase : str = rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# add model prefix if necessary
lowerCamelCase : Optional[int] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase : List[str] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase : str = jnp.asarray(SCREAMING_SNAKE_CASE_ )
continue
if "var" in flax_key[-1]:
lowerCamelCase : Dict = jnp.asarray(SCREAMING_SNAKE_CASE_ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase : Optional[Any] = jnp.asarray(SCREAMING_SNAKE_CASE_ )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase : Optional[int] = jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = os.path.abspath(SCREAMING_SNAKE_CASE_ )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
lowerCamelCase : str = getattr(SCREAMING_SNAKE_CASE_ , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(SCREAMING_SNAKE_CASE_ , "rb" ) as state_f:
try:
lowerCamelCase : Any = from_bytes(SCREAMING_SNAKE_CASE_ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
lowerCamelCase : Union[str, Any] = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE_ : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE_ ) ).values()
if any(SCREAMING_SNAKE_CASE_ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
lowerCamelCase : List[str] = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int = flatten_dict(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = pt_model.state_dict()
lowerCamelCase : Optional[Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase : Any = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase : Optional[int] = []
lowerCamelCase : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase : Optional[int] = flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase : Tuple = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase : Dict = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase : List[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(SCREAMING_SNAKE_CASE_ ) not in pt_model_dict:
# conv layer
lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
lowerCamelCase : Tuple = jnp.transpose(SCREAMING_SNAKE_CASE_ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE_ ) not in pt_model_dict:
# linear layer
lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
lowerCamelCase : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase : str = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
lowerCamelCase : List[str] = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase : Any = ".".join(SCREAMING_SNAKE_CASE_ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase : Tuple = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase : Any = key.split("." )
lowerCamelCase : str = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase : Any = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase : Tuple = key_components[-2] + "_v"
if name is not None:
lowerCamelCase : Any = key_components[:-3] + [name]
lowerCamelCase : Any = ".".join(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[int] = key
if flax_key in special_pt_names:
lowerCamelCase : Any = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowerCamelCase : Optional[Any] = np.asarray(SCREAMING_SNAKE_CASE_ ) if not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) else flax_tensor
lowerCamelCase : int = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE_ )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# re-transform missing_keys to list
lowerCamelCase : List[Any] = list(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
" use it for predictions and inference." )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"If your task is similar to the task the model of the checkpoint was trained on, "
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 340 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase :int =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :List[Any] =self.dummy_uncond_unet
_UpperCamelCase :List[Any] =ScoreSdeVeScheduler()
_UpperCamelCase :int =ScoreSdeVePipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
sde_ve.to(lowerCAmelCase__ )
sde_ve.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase :Union[str, Any] =torch.manual_seed(0 )
_UpperCamelCase :Dict =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=lowerCAmelCase__ ).images
_UpperCamelCase :Optional[int] =torch.manual_seed(0 )
_UpperCamelCase :List[Any] =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )[
0
]
_UpperCamelCase :str =image[0, -3:, -3:, -1]
_UpperCamelCase :Optional[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase :List[str] =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :List[Any] ="""google/ncsnpp-church-256"""
_UpperCamelCase :Tuple =UNetaDModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase :int =ScoreSdeVeScheduler.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase :List[Any] =ScoreSdeVePipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
sde_ve.to(lowerCAmelCase__ )
sde_ve.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase :Tuple =torch.manual_seed(0 )
_UpperCamelCase :Optional[int] =sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=lowerCAmelCase__ ).images
_UpperCamelCase :Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase :List[str] =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 701 | '''simple docstring'''
_lowerCamelCase : Union[str, Any] = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor | 512 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : torch.FloatTensor
_UpperCAmelCase : torch.FloatTensor
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = 1
@register_to_config
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int = 2_0_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.15 ,SCREAMING_SNAKE_CASE__ : float = 0.01 ,SCREAMING_SNAKE_CASE__ : float = 1348.0 ,SCREAMING_SNAKE_CASE__ : float = 1E-5 ,SCREAMING_SNAKE_CASE__ : int = 1 ,):
# standard deviation of the initial noise distribution
__lowerCamelCase : int = sigma_max
# setable values
__lowerCamelCase : List[str] = None
self.set_sigmas(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[int] = None):
return sample
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None):
__lowerCamelCase : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowerCamelCase : Optional[int] = torch.linspace(1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None):
__lowerCamelCase : Optional[int] = sigma_min if sigma_min is not None else self.config.sigma_min
__lowerCamelCase : Optional[int] = sigma_max if sigma_max is not None else self.config.sigma_max
__lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowerCamelCase : Optional[Any] = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE__) ,math.log(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str]):
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device)) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device) ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
__lowerCamelCase : List[str] = timestep * torch.ones(
sample.shape[0] ,device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
__lowerCamelCase : str = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowerCamelCase : Dict = timesteps.to(self.discrete_sigmas.device)
__lowerCamelCase : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device)
__lowerCamelCase : Optional[Any] = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).to(sample.device)
__lowerCamelCase : int = torch.zeros_like(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowerCamelCase : int = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
__lowerCamelCase : List[Any] = diffusion.unsqueeze(-1)
__lowerCamelCase : Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowerCamelCase : int = randn_tensor(
sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__ ,device=sample.device ,dtype=sample.dtype)
__lowerCamelCase : Optional[int] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowerCamelCase : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ ,prev_sample_mean=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowerCamelCase : Optional[int] = randn_tensor(sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__).to(sample.device)
# compute step size from the model_output, the noise, and the snr
__lowerCamelCase : str = torch.norm(model_output.reshape(model_output.shape[0] ,-1) ,dim=-1).mean()
__lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] ,-1) ,dim=-1).mean()
__lowerCamelCase : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowerCamelCase : Optional[int] = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
__lowerCamelCase : List[str] = step_size.unsqueeze(-1)
__lowerCamelCase : str = sample + step_size * model_output
__lowerCamelCase : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase : int = timesteps.to(original_samples.device)
__lowerCamelCase : Any = self.discrete_sigmas.to(original_samples.device)[timesteps]
__lowerCamelCase : Optional[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(SCREAMING_SNAKE_CASE__) * sigmas[:, None, None, None]
)
__lowerCamelCase : str = noise + original_samples
return noisy_samples
def __len__( self : Optional[int]):
return self.config.num_train_timesteps
| 652 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
_UpperCAmelCase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 652 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def UpperCamelCase () -> Any:
A__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=lowercase_ , default=lowercase_ , required=lowercase_ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=lowercase_ , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=lowercase_ , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=lowercase_ , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=lowercase_ , default=0 , help="""cuda_id.""" , )
A__ : Optional[int] = parser.parse_args()
return args
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int , lowercase_: Union[str, Any] ) -> Tuple:
if not len(lowercase_ ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
A__ , A__ : Union[str, Any] = imgs[0].size
A__ : Optional[int] = Image.new("""RGB""" , size=(cols * w, rows * h) )
A__ , A__ : Tuple = grid.size
for i, img in enumerate(lowercase_ ):
grid.paste(lowercase_ , box=(i % cols * w, i // cols * h) )
return grid
def UpperCamelCase (lowercase_: str , lowercase_: Optional[Any]="robotic cat with wings" , lowercase_: Tuple=7.5 , lowercase_: Dict=50 , lowercase_: Optional[Any]=1 , lowercase_: Dict=42 , ) -> int:
A__ : List[str] = torch.Generator(pipeline.device ).manual_seed(lowercase_ )
A__ : int = pipeline(
lowercase_ , guidance_scale=lowercase_ , num_inference_steps=lowercase_ , generator=lowercase_ , num_images_per_prompt=lowercase_ , ).images
A__ : Dict = int(math.sqrt(lowercase_ ) )
A__ : Optional[int] = image_grid(lowercase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
A_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
A_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
A_ : List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
A_ : Dict = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
A_ : int = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
A_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
A_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
A_ : int = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
A_ : Union[str, Any] = unet.to(torch.device('cuda', args.cuda_id))
A_ : int = pipeline.to(unet.device)
A_ , A_ : List[str] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
A_ : Optional[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.